aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c28
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c46
-rw-r--r--arch/powerpc/platforms/pseries/pci.c8
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/gpio/gpio-msm-v2.c4
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/inode.c136
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/sched.h5
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c144
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
33 files changed, 307 insertions, 275 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1344816c4c06..75e3db96dec5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3761,9 +3761,11 @@ F: include/uapi/linux/gigaset_dev.h
3761 3761
3762GPIO SUBSYSTEM 3762GPIO SUBSYSTEM
3763M: Linus Walleij <linus.walleij@linaro.org> 3763M: Linus Walleij <linus.walleij@linaro.org>
3764S: Maintained 3764M: Alexandre Courbot <gnurou@gmail.com>
3765L: linux-gpio@vger.kernel.org 3765L: linux-gpio@vger.kernel.org
3766F: Documentation/gpio.txt 3766T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
3767S: Maintained
3768F: Documentation/gpio/
3767F: drivers/gpio/ 3769F: drivers/gpio/
3768F: include/linux/gpio* 3770F: include/linux/gpio*
3769F: include/asm-generic/gpio.h 3771F: include/asm-generic/gpio.h
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 033c06be1d84..7bdcf340016c 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
720int64_t opal_pci_poll(uint64_t phb_id); 720int64_t opal_pci_poll(uint64_t phb_id);
721int64_t opal_return_cpu(void); 721int64_t opal_return_cpu(void);
722 722
723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); 723int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); 724int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
725 725
726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, 726int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
727 uint32_t addr, uint32_t data, uint32_t sz); 727 uint32_t addr, uint32_t data, uint32_t sz);
728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
729 uint32_t addr, uint32_t *data, uint32_t sz); 729 uint32_t addr, __be32 *data, uint32_t sz);
730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); 730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op); 731int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list); 732int64_t opal_update_flash(uint64_t blk_list);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 779a78c26435..11c1d069d920 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 124void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
125{ 125{
126 unsigned long addr; 126 unsigned long addr;
127 const u32 *basep, *sizep; 127 const __be32 *basep, *sizep;
128 unsigned int rtas_start = 0, rtas_end = 0; 128 unsigned int rtas_start = 0, rtas_end = 0;
129 129
130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
131 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 131 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
132 132
133 if (basep && sizep) { 133 if (basep && sizep) {
134 rtas_start = *basep; 134 rtas_start = be32_to_cpup(basep);
135 rtas_end = *basep + *sizep; 135 rtas_end = rtas_start + be32_to_cpup(sizep);
136 } 136 }
137 137
138 for (addr = begin; addr < end; addr += PAGE_SIZE) { 138 for (addr = begin; addr < end; addr += PAGE_SIZE) {
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75fb40498b41..2e3d2bf536c5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
1555 1555
1556 flush_fp_to_thread(child); 1556 flush_fp_to_thread(child);
1557 if (fpidx < (PT_FPSCR - PT_FPR0)) 1557 if (fpidx < (PT_FPSCR - PT_FPR0))
1558 memcpy(&tmp, &child->thread.fp_state.fpr, 1558 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1559 sizeof(long)); 1559 sizeof(long));
1560 else 1560 else
1561 tmp = child->thread.fp_state.fpscr; 1561 tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
1588 1588
1589 flush_fp_to_thread(child); 1589 flush_fp_to_thread(child);
1590 if (fpidx < (PT_FPSCR - PT_FPR0)) 1590 if (fpidx < (PT_FPSCR - PT_FPR0))
1591 memcpy(&child->thread.fp_state.fpr, &data, 1591 memcpy(&child->thread.TS_FPR(fpidx), &data,
1592 sizeof(long)); 1592 sizeof(long));
1593 else 1593 else
1594 child->thread.fp_state.fpscr = data; 1594 child->thread.fp_state.fpscr = data;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index febc80445d25..bc76cc6b419c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 479 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
480 (dn = of_find_node_by_path("/rtas"))) { 480 (dn = of_find_node_by_path("/rtas"))) {
481 int num_addr_cell, num_size_cell, maxcpus; 481 int num_addr_cell, num_size_cell, maxcpus;
482 const unsigned int *ireg; 482 const __be32 *ireg;
483 483
484 num_addr_cell = of_n_addr_cells(dn); 484 num_addr_cell = of_n_addr_cells(dn);
485 num_size_cell = of_n_size_cells(dn); 485 num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
489 if (!ireg) 489 if (!ireg)
490 goto out; 490 goto out;
491 491
492 maxcpus = ireg[num_addr_cell + num_size_cell]; 492 maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
493 493
494 /* Double maxcpus for processors which have SMT capability */ 494 /* Double maxcpus for processors which have SMT capability */
495 if (cpu_has_feature(CPU_FTR_SMT)) 495 if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a3b64f3bf9a2..c1cf4a1522d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
580int cpu_to_core_id(int cpu) 580int cpu_to_core_id(int cpu)
581{ 581{
582 struct device_node *np; 582 struct device_node *np;
583 const int *reg; 583 const __be32 *reg;
584 int id = -1; 584 int id = -1;
585 585
586 np = of_get_cpu_node(cpu, NULL); 586 np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
591 if (!reg) 591 if (!reg)
592 goto out; 592 goto out;
593 593
594 id = *reg; 594 id = be32_to_cpup(reg);
595out: 595out:
596 of_node_put(np); 596 of_node_put(np);
597 return id; 597 return id;
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index e7e59e4f9892..79d83cad3d67 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
24static u8 opal_lpc_inb(unsigned long port) 24static u8 opal_lpc_inb(unsigned long port)
25{ 25{
26 int64_t rc; 26 int64_t rc;
27 uint32_t data; 27 __be32 data;
28 28
29 if (opal_lpc_chip_id < 0 || port > 0xffff) 29 if (opal_lpc_chip_id < 0 || port > 0xffff)
30 return 0xff; 30 return 0xff;
31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); 31 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
32 return rc ? 0xff : data; 32 return rc ? 0xff : be32_to_cpu(data);
33} 33}
34 34
35static __le16 __opal_lpc_inw(unsigned long port) 35static __le16 __opal_lpc_inw(unsigned long port)
36{ 36{
37 int64_t rc; 37 int64_t rc;
38 uint32_t data; 38 __be32 data;
39 39
40 if (opal_lpc_chip_id < 0 || port > 0xfffe) 40 if (opal_lpc_chip_id < 0 || port > 0xfffe)
41 return 0xffff; 41 return 0xffff;
42 if (port & 1) 42 if (port & 1)
43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); 43 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); 44 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
45 return rc ? 0xffff : data; 45 return rc ? 0xffff : be32_to_cpu(data);
46} 46}
47static u16 opal_lpc_inw(unsigned long port) 47static u16 opal_lpc_inw(unsigned long port)
48{ 48{
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
52static __le32 __opal_lpc_inl(unsigned long port) 52static __le32 __opal_lpc_inl(unsigned long port)
53{ 53{
54 int64_t rc; 54 int64_t rc;
55 uint32_t data; 55 __be32 data;
56 56
57 if (opal_lpc_chip_id < 0 || port > 0xfffc) 57 if (opal_lpc_chip_id < 0 || port > 0xfffc)
58 return 0xffffffff; 58 return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
62 (__le32)opal_lpc_inb(port + 2) << 8 | 62 (__le32)opal_lpc_inb(port + 2) << 8 |
63 opal_lpc_inb(port + 3); 63 opal_lpc_inb(port + 3);
64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); 64 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
65 return rc ? 0xffffffff : data; 65 return rc ? 0xffffffff : be32_to_cpu(data);
66} 66}
67 67
68static u32 opal_lpc_inl(unsigned long port) 68static u32 opal_lpc_inl(unsigned long port)
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4d99a8fd55ac..4fbf276ac99e 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
96{ 96{
97 struct opal_scom_map *m = map; 97 struct opal_scom_map *m = map;
98 int64_t rc; 98 int64_t rc;
99 __be64 v;
99 100
100 reg = opal_scom_unmangle(reg); 101 reg = opal_scom_unmangle(reg);
101 rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); 102 rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
103 *value = be64_to_cpu(v);
102 return opal_xscom_err_xlate(rc); 104 return opal_xscom_err_xlate(rc);
103} 105}
104 106
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e738007eae64..c9fecf09b8fa 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
157{ 157{
158 struct hvcall_ppp_data ppp_data; 158 struct hvcall_ppp_data ppp_data;
159 struct device_node *root; 159 struct device_node *root;
160 const int *perf_level; 160 const __be32 *perf_level;
161 int rc; 161 int rc;
162 162
163 rc = h_get_ppp(&ppp_data); 163 rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
201 perf_level = of_get_property(root, 201 perf_level = of_get_property(root,
202 "ibm,partition-performance-parameters-level", 202 "ibm,partition-performance-parameters-level",
203 NULL); 203 NULL);
204 if (perf_level && (*perf_level >= 1)) { 204 if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
205 seq_printf(m, 205 seq_printf(m,
206 "physical_procs_allocated_to_virtualization=%d\n", 206 "physical_procs_allocated_to_virtualization=%d\n",
207 ppp_data.phys_platform_procs); 207 ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
435 int partition_potential_processors; 435 int partition_potential_processors;
436 int partition_active_processors; 436 int partition_active_processors;
437 struct device_node *rtas_node; 437 struct device_node *rtas_node;
438 const int *lrdrp = NULL; 438 const __be32 *lrdrp = NULL;
439 439
440 rtas_node = of_find_node_by_path("/rtas"); 440 rtas_node = of_find_node_by_path("/rtas");
441 if (rtas_node) 441 if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
444 if (lrdrp == NULL) { 444 if (lrdrp == NULL) {
445 partition_potential_processors = vdso_data->processorCount; 445 partition_potential_processors = vdso_data->processorCount;
446 } else { 446 } else {
447 partition_potential_processors = *(lrdrp + 4); 447 partition_potential_processors = be32_to_cpup(lrdrp + 4);
448 } 448 }
449 of_node_put(rtas_node); 449 of_node_put(rtas_node);
450 450
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
654 const char *model = ""; 654 const char *model = "";
655 const char *system_id = ""; 655 const char *system_id = "";
656 const char *tmp; 656 const char *tmp;
657 const unsigned int *lp_index_ptr; 657 const __be32 *lp_index_ptr;
658 unsigned int lp_index = 0; 658 unsigned int lp_index = 0;
659 659
660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); 660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", 670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
671 NULL); 671 NULL);
672 if (lp_index_ptr) 672 if (lp_index_ptr)
673 lp_index = *lp_index_ptr; 673 lp_index = be32_to_cpup(lp_index_ptr);
674 of_node_put(rootdn); 674 of_node_put(rootdn);
675 } 675 }
676 seq_printf(m, "serial_number=%s\n", system_id); 676 seq_printf(m, "serial_number=%s\n", system_id);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 6d2f0abce6fa..0c882e83c4ce 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
130{ 130{
131 struct device_node *dn; 131 struct device_node *dn;
132 struct pci_dn *pdn; 132 struct pci_dn *pdn;
133 const u32 *req_msi; 133 const __be32 *p;
134 u32 req_msi;
134 135
135 pdn = pci_get_pdn(pdev); 136 pdn = pci_get_pdn(pdev);
136 if (!pdn) 137 if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
138 139
139 dn = pdn->node; 140 dn = pdn->node;
140 141
141 req_msi = of_get_property(dn, prop_name, NULL); 142 p = of_get_property(dn, prop_name, NULL);
142 if (!req_msi) { 143 if (!p) {
143 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); 144 pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
144 return -ENOENT; 145 return -ENOENT;
145 } 146 }
146 147
147 if (*req_msi < nvec) { 148 req_msi = be32_to_cpup(p);
149 if (req_msi < nvec) {
148 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); 150 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
149 151
150 if (*req_msi == 0) /* Be paranoid */ 152 if (req_msi == 0) /* Be paranoid */
151 return -ENOSPC; 153 return -ENOSPC;
152 154
153 return *req_msi; 155 return req_msi;
154 } 156 }
155 157
156 return 0; 158 return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
171static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) 173static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
172{ 174{
173 struct device_node *dn; 175 struct device_node *dn;
174 const u32 *p; 176 const __be32 *p;
175 177
176 dn = of_node_get(pci_device_to_OF_node(dev)); 178 dn = of_node_get(pci_device_to_OF_node(dev));
177 while (dn) { 179 while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
179 if (p) { 181 if (p) {
180 pr_debug("rtas_msi: found prop on dn %s\n", 182 pr_debug("rtas_msi: found prop on dn %s\n",
181 dn->full_name); 183 dn->full_name);
182 *total = *p; 184 *total = be32_to_cpup(p);
183 return dn; 185 return dn;
184 } 186 }
185 187
@@ -232,13 +234,13 @@ struct msi_counts {
232static void *count_non_bridge_devices(struct device_node *dn, void *data) 234static void *count_non_bridge_devices(struct device_node *dn, void *data)
233{ 235{
234 struct msi_counts *counts = data; 236 struct msi_counts *counts = data;
235 const u32 *p; 237 const __be32 *p;
236 u32 class; 238 u32 class;
237 239
238 pr_debug("rtas_msi: counting %s\n", dn->full_name); 240 pr_debug("rtas_msi: counting %s\n", dn->full_name);
239 241
240 p = of_get_property(dn, "class-code", NULL); 242 p = of_get_property(dn, "class-code", NULL);
241 class = p ? *p : 0; 243 class = p ? be32_to_cpup(p) : 0;
242 244
243 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) 245 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
244 counts->num_devices++; 246 counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
249static void *count_spare_msis(struct device_node *dn, void *data) 251static void *count_spare_msis(struct device_node *dn, void *data)
250{ 252{
251 struct msi_counts *counts = data; 253 struct msi_counts *counts = data;
252 const u32 *p; 254 const __be32 *p;
253 int req; 255 int req;
254 256
255 if (dn == counts->requestor) 257 if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
260 req = 0; 262 req = 0;
261 p = of_get_property(dn, "ibm,req#msi", NULL); 263 p = of_get_property(dn, "ibm,req#msi", NULL);
262 if (p) 264 if (p)
263 req = *p; 265 req = be32_to_cpup(p);
264 266
265 p = of_get_property(dn, "ibm,req#msi-x", NULL); 267 p = of_get_property(dn, "ibm,req#msi-x", NULL);
266 if (p) 268 if (p)
267 req = max(req, (int)*p); 269 req = max(req, (int)be32_to_cpup(p));
268 } 270 }
269 271
270 if (req < counts->quota) 272 if (req < counts->quota)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7bfaf58d4664..d7096f2f7751 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
43static DEFINE_SPINLOCK(nvram_lock); 43static DEFINE_SPINLOCK(nvram_lock);
44 44
45struct err_log_info { 45struct err_log_info {
46 int error_type; 46 __be32 error_type;
47 unsigned int seq_num; 47 __be32 seq_num;
48}; 48};
49 49
50struct nvram_os_partition { 50struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
79}; 79};
80 80
81struct oops_log_info { 81struct oops_log_info {
82 u16 version; 82 __be16 version;
83 u16 report_length; 83 __be16 report_length;
84 u64 timestamp; 84 __be64 timestamp;
85} __attribute__((packed)); 85} __attribute__((packed));
86 86
87static void oops_to_nvram(struct kmsg_dumper *dumper, 87static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
291 length = part->size; 291 length = part->size;
292 } 292 }
293 293
294 info.error_type = err_type; 294 info.error_type = cpu_to_be32(err_type);
295 info.seq_num = error_log_cnt; 295 info.seq_num = cpu_to_be32(error_log_cnt);
296 296
297 tmp_index = part->index; 297 tmp_index = part->index;
298 298
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
364 } 364 }
365 365
366 if (part->os_partition) { 366 if (part->os_partition) {
367 *error_log_cnt = info.seq_num; 367 *error_log_cnt = be32_to_cpu(info.seq_num);
368 *err_type = info.error_type; 368 *err_type = be32_to_cpu(info.error_type);
369 } 369 }
370 370
371 return 0; 371 return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
529 pr_err("nvram: logging uncompressed oops/panic report\n"); 529 pr_err("nvram: logging uncompressed oops/panic report\n");
530 return -1; 530 return -1;
531 } 531 }
532 oops_hdr->version = OOPS_HDR_VERSION; 532 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
533 oops_hdr->report_length = (u16) zipped_len; 533 oops_hdr->report_length = cpu_to_be16(zipped_len);
534 oops_hdr->timestamp = get_seconds(); 534 oops_hdr->timestamp = cpu_to_be64(get_seconds());
535 return 0; 535 return 0;
536} 536}
537 537
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
574 clobbering_unread_rtas_event()) 574 clobbering_unread_rtas_event())
575 return -1; 575 return -1;
576 576
577 oops_hdr->version = OOPS_HDR_VERSION; 577 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
578 oops_hdr->report_length = (u16) size; 578 oops_hdr->report_length = cpu_to_be16(size);
579 oops_hdr->timestamp = get_seconds(); 579 oops_hdr->timestamp = cpu_to_be64(get_seconds());
580 580
581 if (compressed) 581 if (compressed)
582 err_type = ERR_TYPE_KERNEL_PANIC_GZ; 582 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
670 size_t length, hdr_size; 670 size_t length, hdr_size;
671 671
672 oops_hdr = (struct oops_log_info *)buff; 672 oops_hdr = (struct oops_log_info *)buff;
673 if (oops_hdr->version < OOPS_HDR_VERSION) { 673 if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
674 /* Old format oops header had 2-byte record size */ 674 /* Old format oops header had 2-byte record size */
675 hdr_size = sizeof(u16); 675 hdr_size = sizeof(u16);
676 length = oops_hdr->version; 676 length = be16_to_cpu(oops_hdr->version);
677 time->tv_sec = 0; 677 time->tv_sec = 0;
678 time->tv_nsec = 0; 678 time->tv_nsec = 0;
679 } else { 679 } else {
680 hdr_size = sizeof(*oops_hdr); 680 hdr_size = sizeof(*oops_hdr);
681 length = oops_hdr->report_length; 681 length = be16_to_cpu(oops_hdr->report_length);
682 time->tv_sec = oops_hdr->timestamp; 682 time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
683 time->tv_nsec = 0; 683 time->tv_nsec = 0;
684 } 684 }
685 *buf = kmalloc(length, GFP_KERNEL); 685 *buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
889 kmsg_dump_get_buffer(dumper, false, 889 kmsg_dump_get_buffer(dumper, false,
890 oops_data, oops_data_sz, &text_len); 890 oops_data, oops_data_sz, &text_len);
891 err_type = ERR_TYPE_KERNEL_PANIC; 891 err_type = ERR_TYPE_KERNEL_PANIC;
892 oops_hdr->version = OOPS_HDR_VERSION; 892 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
893 oops_hdr->report_length = (u16) text_len; 893 oops_hdr->report_length = cpu_to_be16(text_len);
894 oops_hdr->timestamp = get_seconds(); 894 oops_hdr->timestamp = cpu_to_be64(get_seconds());
895 } 895 }
896 896
897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf, 897 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
898 (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, 898 (int) (sizeof(*oops_hdr) + text_len), err_type,
899 ++oops_count); 899 ++oops_count);
900 900
901 spin_unlock_irqrestore(&lock, flags); 901 spin_unlock_irqrestore(&lock, flags);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 5f93856cdf47..70670a2d9cf2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
113{ 113{
114 struct device_node *dn, *pdn; 114 struct device_node *dn, *pdn;
115 struct pci_bus *bus; 115 struct pci_bus *bus;
116 const uint32_t *pcie_link_speed_stats; 116 const __be32 *pcie_link_speed_stats;
117 117
118 bus = bridge->bus; 118 bus = bridge->bus;
119 119
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
122 return 0; 122 return 0;
123 123
124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
125 pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, 125 pcie_link_speed_stats = of_get_property(pdn,
126 "ibm,pcie-link-speed-stats", NULL); 126 "ibm,pcie-link-speed-stats", NULL);
127 if (pcie_link_speed_stats) 127 if (pcie_link_speed_stats)
128 break; 128 break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
135 return 0; 135 return 0;
136 } 136 }
137 137
138 switch (pcie_link_speed_stats[0]) { 138 switch (be32_to_cpup(pcie_link_speed_stats)) {
139 case 0x01: 139 case 0x01:
140 bus->max_bus_speed = PCIE_SPEED_2_5GT; 140 bus->max_bus_speed = PCIE_SPEED_2_5GT;
141 break; 141 break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
147 break; 147 break;
148 } 148 }
149 149
150 switch (pcie_link_speed_stats[1]) { 150 switch (be32_to_cpup(pcie_link_speed_stats)) {
151 case 0x01: 151 case 0x01:
152 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 152 bus->cur_bus_speed = PCIE_SPEED_2_5GT;
153 break; 153 break;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e903c71f7e69..0952ecd60eca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
26 select HAVE_AOUT if X86_32 26 select HAVE_AOUT if X86_32
27 select HAVE_UNSTABLE_SCHED_CLOCK 27 select HAVE_UNSTABLE_SCHED_CLOCK
28 select ARCH_SUPPORTS_NUMA_BALANCING 28 select ARCH_SUPPORTS_NUMA_BALANCING
29 select ARCH_SUPPORTS_INT128 if X86_64
29 select ARCH_WANTS_PROT_NUMA_PROT_NONE 30 select ARCH_WANTS_PROT_NUMA_PROT_NONE
30 select HAVE_IDE 31 select HAVE_IDE
31 select HAVE_OPROFILE 32 select HAVE_OPROFILE
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723636fd..c8b051933b1b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
8DECLARE_PER_CPU(int, __preempt_count); 8DECLARE_PER_CPU(int, __preempt_count);
9 9
10/* 10/*
11 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
12 * that a decrement hitting 0 means we can and should reschedule.
13 */
14#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
15
16/*
11 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 17 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
12 * that think a non-zero value indicates we cannot preempt. 18 * that think a non-zero value indicates we cannot preempt.
13 */ 19 */
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
74 __this_cpu_add_4(__preempt_count, -val); 80 __this_cpu_add_4(__preempt_count, -val);
75} 81}
76 82
83/*
84 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
85 * a decrement which hits zero means we have no preempt_count and should
86 * reschedule.
87 */
77static __always_inline bool __preempt_count_dec_and_test(void) 88static __always_inline bool __preempt_count_dec_and_test(void)
78{ 89{
79 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); 90 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb29425d..c1a861829d81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 262 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 263 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
264 264
265#define EVENT_CONSTRAINT_END \ 265/*
266 EVENT_CONSTRAINT(0, 0, 0) 266 * We define the end marker as having a weight of -1
267 * to enable blacklisting of events using a counter bitmask
268 * of zero and thus a weight of zero.
269 * The end marker has a weight that cannot possibly be
270 * obtained from counting the bits in the bitmask.
271 */
272#define EVENT_CONSTRAINT_END { .weight = -1 }
267 273
274/*
275 * Check for end marker with weight == -1
276 */
268#define for_each_event_constraint(e, c) \ 277#define for_each_event_constraint(e, c) \
269 for ((e) = (c); (e)->weight; (e)++) 278 for ((e) = (c); (e)->weight != -1; (e)++)
270 279
271/* 280/*
272 * Extra registers for specific events. 281 * Extra registers for specific events.
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56962db..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
75config CLKSRC_EFM32 75config CLKSRC_EFM32
76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
78 select CLKSRC_MMIO
78 default ARCH_EFM32 79 default ARCH_EFM32
79 help 80 help
80 Support to use the timers of EFM32 SoCs as clock source and clock 81 Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
35 35
36 init_func = match->data; 36 init_func = match->data;
37 init_func(np); 37 init_func(np);
38 of_node_put(np);
39 } 38 }
40} 39}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
108 108
109static u64 read_sched_clock(void) 109static u64 read_sched_clock(void)
110{ 110{
111 return __raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
113 113
114static const struct of_device_id sptimer_ids[] __initconst = { 114static const struct of_device_id sptimer_ids[] __initconst = {
115 { .compatible = "picochip,pc3x2-rtc" }, 115 { .compatible = "picochip,pc3x2-rtc" },
116 { .compatible = "snps,dw-apb-timer-sp" },
117 { /* Sentinel */ }, 116 { /* Sentinel */ },
118}; 117};
119 118
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
151 num_called++; 150 num_called++;
152} 151}
153CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 152CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); 153CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
155CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
180 timer_base + TIMER_CTL_REG(0)); 180 timer_base + TIMER_CTL_REG(0));
181 181
182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0);
184
182 ret = setup_irq(irq, &sun4i_timer_irq); 185 ret = setup_irq(irq, &sun4i_timer_irq);
183 if (ret) 186 if (ret)
184 pr_warn("failed to setup irq %d\n", irq); 187 pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; 256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
257 257
258 /* 258 /*
259 * Set scale and timer for sched_clock.
260 */
261 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
262
263 /*
264 * Setup free-running clocksource timer (interrupts 259 * Setup free-running clocksource timer (interrupts
265 * disabled). 260 * disabled).
266 */ 261 */
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
270 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
271 TIMER0_DIV(TIMER_DIVIDER_SHIFT)); 266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
272 267
268 /*
269 * Set scale and timer for sched_clock.
270 */
271 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
272
273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
274 "armada_370_xp_clocksource", 274 "armada_370_xp_clocksource",
275 timer_clk, 300, 32, clocksource_mmio_readl_down); 275 timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300973db..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
252 252
253 spin_lock_irqsave(&tlmm_lock, irq_flags); 253 spin_lock_irqsave(&tlmm_lock, irq_flags);
254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); 254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
255 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 255 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
256 __clear_bit(gpio, msm_gpio.enabled_irqs); 256 __clear_bit(gpio, msm_gpio.enabled_irqs);
257 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 257 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
258} 258}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
264 264
265 spin_lock_irqsave(&tlmm_lock, irq_flags); 265 spin_lock_irqsave(&tlmm_lock, irq_flags);
266 __set_bit(gpio, msm_gpio.enabled_irqs); 266 __set_bit(gpio, msm_gpio.enabled_irqs);
267 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 267 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); 268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
269 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 269 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
270} 270}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a30567a..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
169 u32 pending; 169 u32 pending;
170 unsigned int offset, irqs_handled = 0; 170 unsigned int offset, irqs_handled = 0;
171 171
172 while ((pending = gpio_rcar_read(p, INTDT))) { 172 while ((pending = gpio_rcar_read(p, INTDT) &
173 gpio_rcar_read(p, INTMSK))) {
173 offset = __ffs(pending); 174 offset = __ffs(pending);
174 gpio_rcar_write(p, INTCLR, BIT(offset)); 175 gpio_rcar_write(p, INTCLR, BIT(offset));
175 generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); 176 generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6577b9..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
300 if (offset < TWL4030_GPIO_MAX) 300 if (offset < TWL4030_GPIO_MAX)
301 ret = twl4030_set_gpio_direction(offset, 1); 301 ret = twl4030_set_gpio_direction(offset, 1);
302 else 302 else
303 ret = -EINVAL; 303 ret = -EINVAL; /* LED outputs can't be set as input */
304 304
305 if (!ret) 305 if (!ret)
306 priv->direction &= ~BIT(offset); 306 priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = -EINVAL; 357 int ret = 0;
358 358
359 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
360 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX) {
361 ret = twl4030_set_gpio_direction(offset, 0); 361 ret = twl4030_set_gpio_direction(offset, 0);
362 if (ret) {
363 mutex_unlock(&priv->mutex);
364 return ret;
365 }
366 }
367
368 /*
369 * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
370 */
362 371
363 priv->direction |= BIT(offset); 372 priv->direction |= BIT(offset);
364 mutex_unlock(&priv->mutex); 373 mutex_unlock(&priv->mutex);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
254#define PINMUX_GPIO(_pin) \ 254#define PINMUX_GPIO(_pin) \
255 [GPIO_##_pin] = { \ 255 [GPIO_##_pin] = { \
256 .pin = (u16)-1, \ 256 .pin = (u16)-1, \
257 .name = __stringify(name), \ 257 .name = __stringify(GPIO_##_pin), \
258 .enum_id = _pin##_DATA, \ 258 .enum_id = _pin##_DATA, \
259 } 259 }
260 260
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e561c059539..ec3ba43b9faa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
210 if (err < 0) { 210 if (err < 0) {
211 SetPageError(page); 211 SetPageError(page);
212 goto out; 212 goto out;
213 } else if (err < PAGE_CACHE_SIZE) { 213 } else {
214 if (err < PAGE_CACHE_SIZE) {
214 /* zero fill remainder of page */ 215 /* zero fill remainder of page */
215 zero_user_segment(page, err, PAGE_CACHE_SIZE); 216 zero_user_segment(page, err, PAGE_CACHE_SIZE);
217 } else {
218 flush_dcache_page(page);
219 }
216 } 220 }
217 SetPageUptodate(page); 221 SetPageUptodate(page);
218 222
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9a8e396aed89..278fd2891288 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
978 struct ceph_mds_reply_inode *ininfo; 978 struct ceph_mds_reply_inode *ininfo;
979 struct ceph_vino vino; 979 struct ceph_vino vino;
980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
981 int i = 0;
982 int err = 0; 981 int err = 0;
983 982
984 dout("fill_trace %p is_dentry %d is_target %d\n", req, 983 dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1039 } 1038 }
1040 } 1039 }
1041 1040
1041 if (rinfo->head->is_target) {
1042 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1043 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1044
1045 in = ceph_get_inode(sb, vino);
1046 if (IS_ERR(in)) {
1047 err = PTR_ERR(in);
1048 goto done;
1049 }
1050 req->r_target_inode = in;
1051
1052 err = fill_inode(in, &rinfo->targeti, NULL,
1053 session, req->r_request_started,
1054 (le32_to_cpu(rinfo->head->result) == 0) ?
1055 req->r_fmode : -1,
1056 &req->r_caps_reservation);
1057 if (err < 0) {
1058 pr_err("fill_inode badness %p %llx.%llx\n",
1059 in, ceph_vinop(in));
1060 goto done;
1061 }
1062 }
1063
1042 /* 1064 /*
1043 * ignore null lease/binding on snapdir ENOENT, or else we 1065 * ignore null lease/binding on snapdir ENOENT, or else we
1044 * will have trouble splicing in the virtual snapdir later 1066 * will have trouble splicing in the virtual snapdir later
@@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1108 ceph_dentry(req->r_old_dentry)->offset); 1130 ceph_dentry(req->r_old_dentry)->offset);
1109 1131
1110 dn = req->r_old_dentry; /* use old_dentry */ 1132 dn = req->r_old_dentry; /* use old_dentry */
1111 in = dn->d_inode;
1112 } 1133 }
1113 1134
1114 /* null dentry? */ 1135 /* null dentry? */
@@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1130 } 1151 }
1131 1152
1132 /* attach proper inode */ 1153 /* attach proper inode */
1133 ininfo = rinfo->targeti.in; 1154 if (!dn->d_inode) {
1134 vino.ino = le64_to_cpu(ininfo->ino); 1155 ihold(in);
1135 vino.snap = le64_to_cpu(ininfo->snapid);
1136 in = dn->d_inode;
1137 if (!in) {
1138 in = ceph_get_inode(sb, vino);
1139 if (IS_ERR(in)) {
1140 pr_err("fill_trace bad get_inode "
1141 "%llx.%llx\n", vino.ino, vino.snap);
1142 err = PTR_ERR(in);
1143 d_drop(dn);
1144 goto done;
1145 }
1146 dn = splice_dentry(dn, in, &have_lease, true); 1156 dn = splice_dentry(dn, in, &have_lease, true);
1147 if (IS_ERR(dn)) { 1157 if (IS_ERR(dn)) {
1148 err = PTR_ERR(dn); 1158 err = PTR_ERR(dn);
1149 goto done; 1159 goto done;
1150 } 1160 }
1151 req->r_dentry = dn; /* may have spliced */ 1161 req->r_dentry = dn; /* may have spliced */
1152 ihold(in); 1162 } else if (dn->d_inode && dn->d_inode != in) {
1153 } else if (ceph_ino(in) == vino.ino &&
1154 ceph_snap(in) == vino.snap) {
1155 ihold(in);
1156 } else {
1157 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1163 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1158 dn, in, ceph_ino(in), ceph_snap(in), 1164 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1159 vino.ino, vino.snap); 1165 ceph_vinop(in));
1160 have_lease = false; 1166 have_lease = false;
1161 in = NULL;
1162 } 1167 }
1163 1168
1164 if (have_lease) 1169 if (have_lease)
1165 update_dentry_lease(dn, rinfo->dlease, session, 1170 update_dentry_lease(dn, rinfo->dlease, session,
1166 req->r_request_started); 1171 req->r_request_started);
1167 dout(" final dn %p\n", dn); 1172 dout(" final dn %p\n", dn);
1168 i++; 1173 } else if (!req->r_aborted &&
1169 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1174 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1170 req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { 1175 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1171 struct dentry *dn = req->r_dentry; 1176 struct dentry *dn = req->r_dentry;
1172 1177
1173 /* fill out a snapdir LOOKUPSNAP dentry */ 1178 /* fill out a snapdir LOOKUPSNAP dentry */
@@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1177 ininfo = rinfo->targeti.in; 1182 ininfo = rinfo->targeti.in;
1178 vino.ino = le64_to_cpu(ininfo->ino); 1183 vino.ino = le64_to_cpu(ininfo->ino);
1179 vino.snap = le64_to_cpu(ininfo->snapid); 1184 vino.snap = le64_to_cpu(ininfo->snapid);
1180 in = ceph_get_inode(sb, vino);
1181 if (IS_ERR(in)) {
1182 pr_err("fill_inode get_inode badness %llx.%llx\n",
1183 vino.ino, vino.snap);
1184 err = PTR_ERR(in);
1185 d_delete(dn);
1186 goto done;
1187 }
1188 dout(" linking snapped dir %p to dn %p\n", in, dn); 1185 dout(" linking snapped dir %p to dn %p\n", in, dn);
1186 ihold(in);
1189 dn = splice_dentry(dn, in, NULL, true); 1187 dn = splice_dentry(dn, in, NULL, true);
1190 if (IS_ERR(dn)) { 1188 if (IS_ERR(dn)) {
1191 err = PTR_ERR(dn); 1189 err = PTR_ERR(dn);
1192 goto done; 1190 goto done;
1193 } 1191 }
1194 req->r_dentry = dn; /* may have spliced */ 1192 req->r_dentry = dn; /* may have spliced */
1195 ihold(in);
1196 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1197 }
1198
1199 if (rinfo->head->is_target) {
1200 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1201 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1202
1203 if (in == NULL || ceph_ino(in) != vino.ino ||
1204 ceph_snap(in) != vino.snap) {
1205 in = ceph_get_inode(sb, vino);
1206 if (IS_ERR(in)) {
1207 err = PTR_ERR(in);
1208 goto done;
1209 }
1210 }
1211 req->r_target_inode = in;
1212
1213 err = fill_inode(in,
1214 &rinfo->targeti, NULL,
1215 session, req->r_request_started,
1216 (le32_to_cpu(rinfo->head->result) == 0) ?
1217 req->r_fmode : -1,
1218 &req->r_caps_reservation);
1219 if (err < 0) {
1220 pr_err("fill_inode badness %p %llx.%llx\n",
1221 in, ceph_vinop(in));
1222 goto done;
1223 }
1224 } 1193 }
1225
1226done: 1194done:
1227 dout("fill_trace done err=%d\n", err); 1195 dout("fill_trace done err=%d\n", err);
1228 return err; 1196 return err;
@@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1272 struct qstr dname; 1240 struct qstr dname;
1273 struct dentry *dn; 1241 struct dentry *dn;
1274 struct inode *in; 1242 struct inode *in;
1275 int err = 0, i; 1243 int err = 0, ret, i;
1276 struct inode *snapdir = NULL; 1244 struct inode *snapdir = NULL;
1277 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1245 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1278 struct ceph_dentry_info *di; 1246 struct ceph_dentry_info *di;
@@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1305 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1273 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1306 } 1274 }
1307 1275
1276 /* FIXME: release caps/leases if error occurs */
1308 for (i = 0; i < rinfo->dir_nr; i++) { 1277 for (i = 0; i < rinfo->dir_nr; i++) {
1309 struct ceph_vino vino; 1278 struct ceph_vino vino;
1310 1279
@@ -1329,9 +1298,10 @@ retry_lookup:
1329 err = -ENOMEM; 1298 err = -ENOMEM;
1330 goto out; 1299 goto out;
1331 } 1300 }
1332 err = ceph_init_dentry(dn); 1301 ret = ceph_init_dentry(dn);
1333 if (err < 0) { 1302 if (ret < 0) {
1334 dput(dn); 1303 dput(dn);
1304 err = ret;
1335 goto out; 1305 goto out;
1336 } 1306 }
1337 } else if (dn->d_inode && 1307 } else if (dn->d_inode &&
@@ -1351,9 +1321,6 @@ retry_lookup:
1351 spin_unlock(&parent->d_lock); 1321 spin_unlock(&parent->d_lock);
1352 } 1322 }
1353 1323
1354 di = dn->d_fsdata;
1355 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1356
1357 /* inode */ 1324 /* inode */
1358 if (dn->d_inode) { 1325 if (dn->d_inode) {
1359 in = dn->d_inode; 1326 in = dn->d_inode;
@@ -1366,26 +1333,39 @@ retry_lookup:
1366 err = PTR_ERR(in); 1333 err = PTR_ERR(in);
1367 goto out; 1334 goto out;
1368 } 1335 }
1369 dn = splice_dentry(dn, in, NULL, false);
1370 if (IS_ERR(dn))
1371 dn = NULL;
1372 } 1336 }
1373 1337
1374 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1338 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1375 req->r_request_started, -1, 1339 req->r_request_started, -1,
1376 &req->r_caps_reservation) < 0) { 1340 &req->r_caps_reservation) < 0) {
1377 pr_err("fill_inode badness on %p\n", in); 1341 pr_err("fill_inode badness on %p\n", in);
1342 if (!dn->d_inode)
1343 iput(in);
1344 d_drop(dn);
1378 goto next_item; 1345 goto next_item;
1379 } 1346 }
1380 if (dn) 1347
1381 update_dentry_lease(dn, rinfo->dir_dlease[i], 1348 if (!dn->d_inode) {
1382 req->r_session, 1349 dn = splice_dentry(dn, in, NULL, false);
1383 req->r_request_started); 1350 if (IS_ERR(dn)) {
1351 err = PTR_ERR(dn);
1352 dn = NULL;
1353 goto next_item;
1354 }
1355 }
1356
1357 di = dn->d_fsdata;
1358 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1359
1360 update_dentry_lease(dn, rinfo->dir_dlease[i],
1361 req->r_session,
1362 req->r_request_started);
1384next_item: 1363next_item:
1385 if (dn) 1364 if (dn)
1386 dput(dn); 1365 dput(dn);
1387 } 1366 }
1388 req->r_did_prepopulate = true; 1367 if (err == 0)
1368 req->r_did_prepopulate = true;
1389 1369
1390out: 1370out:
1391 if (snapdir) { 1371 if (snapdir) {
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6#define PREEMPT_ENABLED (0)
7 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 7
8 * that think a non-zero value indicates we cannot preempt.
9 */
10static __always_inline int preempt_count(void) 8static __always_inline int preempt_count(void)
11{ 9{
12 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; 10 return current_thread_info()->preempt_count;
13} 11}
14 12
15static __always_inline int *preempt_count_ptr(void) 13static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
17 return &current_thread_info()->preempt_count; 15 return &current_thread_info()->preempt_count;
18} 16}
19 17
20/*
21 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
22 * alternative is loosing a reschedule. Better schedule too often -- also this
23 * should be a very rare operation.
24 */
25static __always_inline void preempt_count_set(int pc) 18static __always_inline void preempt_count_set(int pc)
26{ 19{
27 *preempt_count_ptr() = pc; 20 *preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
41 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ 34 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
42} while (0) 35} while (0)
43 36
44/*
45 * We fold the NEED_RESCHED bit into the preempt count such that
46 * preempt_enable() can decrement and test for needing to reschedule with a
47 * single instruction.
48 *
49 * We invert the actual bit, so that when the decrement hits 0 we know we both
50 * need to resched (the bit is cleared) and can resched (no preempt count).
51 */
52
53static __always_inline void set_preempt_need_resched(void) 37static __always_inline void set_preempt_need_resched(void)
54{ 38{
55 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
56} 39}
57 40
58static __always_inline void clear_preempt_need_resched(void) 41static __always_inline void clear_preempt_need_resched(void)
59{ 42{
60 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
61} 43}
62 44
63static __always_inline bool test_preempt_need_resched(void) 45static __always_inline bool test_preempt_need_resched(void)
64{ 46{
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 47 return false;
66} 48}
67 49
68/* 50/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
81 63
82static __always_inline bool __preempt_count_dec_and_test(void) 64static __always_inline bool __preempt_count_dec_and_test(void)
83{ 65{
84 return !--*preempt_count_ptr(); 66 /*
67 * Because of load-store architectures cannot do per-cpu atomic
68 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
69 * lost.
70 */
71 return !--*preempt_count_ptr() && tif_need_resched();
85} 72}
86 73
87/* 74/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
89 */ 76 */
90static __always_inline bool should_resched(void) 77static __always_inline bool should_resched(void)
91{ 78{
92 return unlikely(!*preempt_count_ptr()); 79 return unlikely(!preempt_count() && tif_need_resched());
93} 80}
94 81
95#ifdef CONFIG_PREEMPT 82#ifdef CONFIG_PREEMPT
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037dfacb..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
440 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
441 } 441 }
442 442
443#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
444
445#ifdef CONFIG_PREEMPT_COUNT 443#ifdef CONFIG_PREEMPT_COUNT
446#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
447#else 445#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
932struct uts_namespace; 930struct uts_namespace;
933 931
934struct load_weight { 932struct load_weight {
935 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
936}; 935};
937 936
938struct sched_avg { 937struct sched_avg {
diff --git a/init/Kconfig b/init/Kconfig
index 79383d3aa5dc..4e5d96ab2034 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
809config ARCH_SUPPORTS_NUMA_BALANCING 809config ARCH_SUPPORTS_NUMA_BALANCING
810 bool 810 bool
811 811
812#
813# For architectures that know their GCC __int128 support is sound
814#
815config ARCH_SUPPORTS_INT128
816 bool
817
812# For architectures that (ab)use NUMA to represent different memory regions 818# For architectures that (ab)use NUMA to represent different memory regions
813# all cpu-local but of different latencies, such as SuperH. 819# all cpu-local but of different latencies, such as SuperH.
814# 820#
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e85cda20ab2b..19af58f3a261 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5112,6 +5112,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5112 * die on a /0 trap. 5112 * die on a /0 trap.
5113 */ 5113 */
5114 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); 5114 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5115 sg->sgp->power_orig = sg->sgp->power;
5115 5116
5116 /* 5117 /*
5117 * Make sure the first group of this domain contains the 5118 * Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fd773ade1a31..9030da7bcb15 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
178 update_sysctl(); 178 update_sysctl();
179} 179}
180 180
181#if BITS_PER_LONG == 32 181#define WMULT_CONST (~0U)
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32 182#define WMULT_SHIFT 32
188 183
189/* 184static void __update_inv_weight(struct load_weight *lw)
190 * Shift right and round: 185{
191 */ 186 unsigned long w;
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
193 200
194/* 201/*
195 * delta *= weight / lw 202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
196 */ 212 */
197static unsigned long 213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{ 214{
201 u64 tmp; 215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
202 217
203 /* 218 __update_inv_weight(lw);
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212 219
213 if (!lw->inv_weight) { 220 if (unlikely(fact >> 32)) {
214 unsigned long w = scale_load_down(lw->weight); 221 while (fact >> 32) {
215 222 fact >>= 1;
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 223 shift--;
217 lw->inv_weight = 1; 224 }
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 } 225 }
223 226
224 /* 227 /* hint to use a 32x32->64 mul */
225 * Check whether we'd overflow the 64-bit multiplication: 228 fact = (u64)(u32)fact * lw->inv_weight;
226 */ 229
227 if (unlikely(tmp > WMULT_CONST)) 230 while (fact >> 32) {
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, 231 fact >>= 1;
229 WMULT_SHIFT/2); 232 shift--;
230 else 233 }
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232 234
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 235 return mul_u64_u32_shr(delta_exec, fact, shift);
234} 236}
235 237
236 238
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
443#endif /* CONFIG_FAIR_GROUP_SCHED */ 445#endif /* CONFIG_FAIR_GROUP_SCHED */
444 446
445static __always_inline 447static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); 448void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
447 449
448/************************************************************** 450/**************************************************************
449 * Scheduling class tree data structure manipulation methods: 451 * Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
612/* 614/*
613 * delta /= w 615 * delta /= w
614 */ 616 */
615static inline unsigned long 617static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{ 618{
618 if (unlikely(se->load.weight != NICE_0_LOAD)) 619 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); 620 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
620 621
621 return delta; 622 return delta;
622} 623}
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
665 update_load_add(&lw, se->load.weight); 666 update_load_add(&lw, se->load.weight);
666 load = &lw; 667 load = &lw;
667 } 668 }
668 slice = calc_delta_mine(slice, se->load.weight, load); 669 slice = __calc_delta(slice, se->load.weight, load);
669 } 670 }
670 return slice; 671 return slice;
671} 672}
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
703#endif 704#endif
704 705
705/* 706/*
706 * Update the current task's runtime statistics. Skip current tasks that 707 * Update the current task's runtime statistics.
707 * are not in our scheduling class.
708 */ 708 */
709static inline void
710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
712{
713 unsigned long delta_exec_weighted;
714
715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
717
718 curr->sum_exec_runtime += delta_exec;
719 schedstat_add(cfs_rq, exec_clock, delta_exec);
720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
721
722 curr->vruntime += delta_exec_weighted;
723 update_min_vruntime(cfs_rq);
724}
725
726static void update_curr(struct cfs_rq *cfs_rq) 709static void update_curr(struct cfs_rq *cfs_rq)
727{ 710{
728 struct sched_entity *curr = cfs_rq->curr; 711 struct sched_entity *curr = cfs_rq->curr;
729 u64 now = rq_clock_task(rq_of(cfs_rq)); 712 u64 now = rq_clock_task(rq_of(cfs_rq));
730 unsigned long delta_exec; 713 u64 delta_exec;
731 714
732 if (unlikely(!curr)) 715 if (unlikely(!curr))
733 return; 716 return;
734 717
735 /* 718 delta_exec = now - curr->exec_start;
736 * Get the amount of time the current task was running 719 if (unlikely((s64)delta_exec <= 0))
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
740 delta_exec = (unsigned long)(now - curr->exec_start);
741 if (!delta_exec)
742 return; 720 return;
743 721
744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now; 722 curr->exec_start = now;
746 723
724 schedstat_set(curr->statistics.exec_max,
725 max(delta_exec, curr->statistics.exec_max));
726
727 curr->sum_exec_runtime += delta_exec;
728 schedstat_add(cfs_rq, exec_clock, delta_exec);
729
730 curr->vruntime += calc_delta_fair(delta_exec, curr);
731 update_min_vruntime(cfs_rq);
732
747 if (entity_is_task(curr)) { 733 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr); 734 struct task_struct *curtask = task_of(curr);
749 735
@@ -3015,8 +3001,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3015 } 3001 }
3016} 3002}
3017 3003
3018static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3004static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3019 unsigned long delta_exec)
3020{ 3005{
3021 /* dock delta_exec before expiring quota (as it could span periods) */ 3006 /* dock delta_exec before expiring quota (as it could span periods) */
3022 cfs_rq->runtime_remaining -= delta_exec; 3007 cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3019,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3034} 3019}
3035 3020
3036static __always_inline 3021static __always_inline
3037void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) 3022void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3038{ 3023{
3039 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 3024 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3040 return; 3025 return;
@@ -3574,8 +3559,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3574 return rq_clock_task(rq_of(cfs_rq)); 3559 return rq_clock_task(rq_of(cfs_rq));
3575} 3560}
3576 3561
3577static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 3562static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3578 unsigned long delta_exec) {}
3579static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3563static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3580static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 3564static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3581static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 3565static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index dc4de3762111..bcf1d2f0b791 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,9 +18,9 @@
18#include "helpers/bitmask.h" 18#include "helpers/bitmask.h"
19 19
20static struct option set_opts[] = { 20static struct option set_opts[] = {
21 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 21 { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
22 { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, 22 { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
23 { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, 23 { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
24 { }, 24 { },
25}; 25};
26 26