aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/debug/kdb/kdb_bt.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c15
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c35
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/debug/kdb/kdb_support.c28
-rw-r--r--kernel/resource.c19
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c66
-rw-r--r--kernel/sched/psi.c43
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/user_namespace.c12
15 files changed, 163 insertions, 114 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6377225b2082..1a796e0799ec 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
553int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 553int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
554 char *sym) 554 char *sym)
555{ 555{
556 unsigned long symbol_start, symbol_end;
557 struct bpf_prog_aux *aux; 556 struct bpf_prog_aux *aux;
558 unsigned int it = 0; 557 unsigned int it = 0;
559 int ret = -ERANGE; 558 int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
566 if (it++ != symnum) 565 if (it++ != symnum)
567 continue; 566 continue;
568 567
569 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
570 bpf_get_prog_name(aux->prog, sym); 568 bpf_get_prog_name(aux->prog, sym);
571 569
572 *value = symbol_start; 570 *value = (unsigned long)aux->prog->bpf_func;
573 *type = BPF_SYM_ELF_TYPE; 571 *type = BPF_SYM_ELF_TYPE;
574 572
575 ret = 0; 573 ret = 0;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ccb93277aae2..cf5040fd5434 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2078 info.jited_prog_len = 0; 2078 info.jited_prog_len = 0;
2079 info.xlated_prog_len = 0; 2079 info.xlated_prog_len = 0;
2080 info.nr_jited_ksyms = 0; 2080 info.nr_jited_ksyms = 0;
2081 info.nr_jited_func_lens = 0;
2081 goto done; 2082 goto done;
2082 } 2083 }
2083 2084
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2158 } 2159 }
2159 2160
2160 ulen = info.nr_jited_ksyms; 2161 ulen = info.nr_jited_ksyms;
2161 info.nr_jited_ksyms = prog->aux->func_cnt; 2162 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2162 if (info.nr_jited_ksyms && ulen) { 2163 if (info.nr_jited_ksyms && ulen) {
2163 if (bpf_dump_raw_ok()) { 2164 if (bpf_dump_raw_ok()) {
2165 unsigned long ksym_addr;
2164 u64 __user *user_ksyms; 2166 u64 __user *user_ksyms;
2165 ulong ksym_addr;
2166 u32 i; 2167 u32 i;
2167 2168
2168 /* copy the address of the kernel symbol 2169 /* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2170 */ 2171 */
2171 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2172 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2172 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2173 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2173 for (i = 0; i < ulen; i++) { 2174 if (prog->aux->func_cnt) {
2174 ksym_addr = (ulong) prog->aux->func[i]->bpf_func; 2175 for (i = 0; i < ulen; i++) {
2175 ksym_addr &= PAGE_MASK; 2176 ksym_addr = (unsigned long)
2176 if (put_user((u64) ksym_addr, &user_ksyms[i])) 2177 prog->aux->func[i]->bpf_func;
2178 if (put_user((u64) ksym_addr,
2179 &user_ksyms[i]))
2180 return -EFAULT;
2181 }
2182 } else {
2183 ksym_addr = (unsigned long) prog->bpf_func;
2184 if (put_user((u64) ksym_addr, &user_ksyms[0]))
2177 return -EFAULT; 2185 return -EFAULT;
2178 } 2186 }
2179 } else { 2187 } else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2182 } 2190 }
2183 2191
2184 ulen = info.nr_jited_func_lens; 2192 ulen = info.nr_jited_func_lens;
2185 info.nr_jited_func_lens = prog->aux->func_cnt; 2193 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2186 if (info.nr_jited_func_lens && ulen) { 2194 if (info.nr_jited_func_lens && ulen) {
2187 if (bpf_dump_raw_ok()) { 2195 if (bpf_dump_raw_ok()) {
2188 u32 __user *user_lens; 2196 u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2191 /* copy the JITed image lengths for each function */ 2199 /* copy the JITed image lengths for each function */
2192 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2200 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2193 user_lens = u64_to_user_ptr(info.jited_func_lens); 2201 user_lens = u64_to_user_ptr(info.jited_func_lens);
2194 for (i = 0; i < ulen; i++) { 2202 if (prog->aux->func_cnt) {
2195 func_len = prog->aux->func[i]->jited_len; 2203 for (i = 0; i < ulen; i++) {
2196 if (put_user(func_len, &user_lens[i])) 2204 func_len =
2205 prog->aux->func[i]->jited_len;
2206 if (put_user(func_len, &user_lens[i]))
2207 return -EFAULT;
2208 }
2209 } else {
2210 func_len = prog->jited_len;
2211 if (put_user(func_len, &user_lens[0]))
2197 return -EFAULT; 2212 return -EFAULT;
2198 } 2213 }
2199 } else { 2214 } else {
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 6ad4a9fcbd6f..7921ae4fca8d 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
179 kdb_printf("no process for cpu %ld\n", cpu); 179 kdb_printf("no process for cpu %ld\n", cpu);
180 return 0; 180 return 0;
181 } 181 }
182 sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); 182 sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
183 kdb_parse(buf); 183 kdb_parse(buf);
184 return 0; 184 return 0;
185 } 185 }
186 kdb_printf("btc: cpu status: "); 186 kdb_printf("btc: cpu status: ");
187 kdb_parse("cpu\n"); 187 kdb_parse("cpu\n");
188 for_each_online_cpu(cpu) { 188 for_each_online_cpu(cpu) {
189 sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); 189 sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
190 kdb_parse(buf); 190 kdb_parse(buf);
191 touch_nmi_watchdog(); 191 touch_nmi_watchdog();
192 } 192 }
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index ed5d34925ad0..6a4b41484afe 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
216 int count; 216 int count;
217 int i; 217 int i;
218 int diag, dtab_count; 218 int diag, dtab_count;
219 int key; 219 int key, buf_size, ret;
220 220
221 221
222 diag = kdbgetintenv("DTABCOUNT", &dtab_count); 222 diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -336,9 +336,8 @@ poll_again:
336 else 336 else
337 p_tmp = tmpbuffer; 337 p_tmp = tmpbuffer;
338 len = strlen(p_tmp); 338 len = strlen(p_tmp);
339 count = kallsyms_symbol_complete(p_tmp, 339 buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
340 sizeof(tmpbuffer) - 340 count = kallsyms_symbol_complete(p_tmp, buf_size);
341 (p_tmp - tmpbuffer));
342 if (tab == 2 && count > 0) { 341 if (tab == 2 && count > 0) {
343 kdb_printf("\n%d symbols are found.", count); 342 kdb_printf("\n%d symbols are found.", count);
344 if (count > dtab_count) { 343 if (count > dtab_count) {
@@ -350,9 +349,13 @@ poll_again:
350 } 349 }
351 kdb_printf("\n"); 350 kdb_printf("\n");
352 for (i = 0; i < count; i++) { 351 for (i = 0; i < count; i++) {
353 if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) 352 ret = kallsyms_symbol_next(p_tmp, i, buf_size);
353 if (WARN_ON(!ret))
354 break; 354 break;
355 kdb_printf("%s ", p_tmp); 355 if (ret != -E2BIG)
356 kdb_printf("%s ", p_tmp);
357 else
358 kdb_printf("%s... ", p_tmp);
356 *(p_tmp + len) = '\0'; 359 *(p_tmp + len) = '\0';
357 } 360 }
358 if (i >= dtab_count) 361 if (i >= dtab_count)
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
index 118527aa60ea..750497b0003a 100644
--- a/kernel/debug/kdb/kdb_keyboard.c
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
173 case KT_LATIN: 173 case KT_LATIN:
174 if (isprint(keychar)) 174 if (isprint(keychar))
175 break; /* printable characters */ 175 break; /* printable characters */
176 /* drop through */ 176 /* fall through */
177 case KT_SPEC: 177 case KT_SPEC:
178 if (keychar == K_ENTER) 178 if (keychar == K_ENTER)
179 break; 179 break;
180 /* drop through */ 180 /* fall through */
181 default: 181 default:
182 return -1; /* ignore unprintables */ 182 return -1; /* ignore unprintables */
183 } 183 }
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index bb4fe4e1a601..d72b32c66f7d 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1192 if (reason == KDB_REASON_DEBUG) { 1192 if (reason == KDB_REASON_DEBUG) {
1193 /* special case below */ 1193 /* special case below */
1194 } else { 1194 } else {
1195 kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", 1195 kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
1196 kdb_current, kdb_current ? kdb_current->pid : 0); 1196 kdb_current, kdb_current ? kdb_current->pid : 0);
1197#if defined(CONFIG_SMP) 1197#if defined(CONFIG_SMP)
1198 kdb_printf("on processor %d ", raw_smp_processor_id()); 1198 kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1208 */ 1208 */
1209 switch (db_result) { 1209 switch (db_result) {
1210 case KDB_DB_BPT: 1210 case KDB_DB_BPT:
1211 kdb_printf("\nEntering kdb (0x%p, pid %d) ", 1211 kdb_printf("\nEntering kdb (0x%px, pid %d) ",
1212 kdb_current, kdb_current->pid); 1212 kdb_current, kdb_current->pid);
1213#if defined(CONFIG_SMP) 1213#if defined(CONFIG_SMP)
1214 kdb_printf("on processor %d ", raw_smp_processor_id()); 1214 kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
1493 char cbuf[32]; 1493 char cbuf[32];
1494 char *c = cbuf; 1494 char *c = cbuf;
1495 int i; 1495 int i;
1496 int j;
1496 unsigned long word; 1497 unsigned long word;
1497 1498
1498 memset(cbuf, '\0', sizeof(cbuf)); 1499 memset(cbuf, '\0', sizeof(cbuf));
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
1538 wc.word = word; 1539 wc.word = word;
1539#define printable_char(c) \ 1540#define printable_char(c) \
1540 ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) 1541 ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
1541 switch (bytesperword) { 1542 for (j = 0; j < bytesperword; j++)
1542 case 8:
1543 *c++ = printable_char(*cp++); 1543 *c++ = printable_char(*cp++);
1544 *c++ = printable_char(*cp++); 1544 addr += bytesperword;
1545 *c++ = printable_char(*cp++);
1546 *c++ = printable_char(*cp++);
1547 addr += 4;
1548 case 4:
1549 *c++ = printable_char(*cp++);
1550 *c++ = printable_char(*cp++);
1551 addr += 2;
1552 case 2:
1553 *c++ = printable_char(*cp++);
1554 addr++;
1555 case 1:
1556 *c++ = printable_char(*cp++);
1557 addr++;
1558 break;
1559 }
1560#undef printable_char 1545#undef printable_char
1561 } 1546 }
1562 } 1547 }
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
2048 if (mod->state == MODULE_STATE_UNFORMED) 2033 if (mod->state == MODULE_STATE_UNFORMED)
2049 continue; 2034 continue;
2050 2035
2051 kdb_printf("%-20s%8u 0x%p ", mod->name, 2036 kdb_printf("%-20s%8u 0x%px ", mod->name,
2052 mod->core_layout.size, (void *)mod); 2037 mod->core_layout.size, (void *)mod);
2053#ifdef CONFIG_MODULE_UNLOAD 2038#ifdef CONFIG_MODULE_UNLOAD
2054 kdb_printf("%4d ", module_refcount(mod)); 2039 kdb_printf("%4d ", module_refcount(mod));
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
2059 kdb_printf(" (Loading)"); 2044 kdb_printf(" (Loading)");
2060 else 2045 else
2061 kdb_printf(" (Live)"); 2046 kdb_printf(" (Live)");
2062 kdb_printf(" 0x%p", mod->core_layout.base); 2047 kdb_printf(" 0x%px", mod->core_layout.base);
2063 2048
2064#ifdef CONFIG_MODULE_UNLOAD 2049#ifdef CONFIG_MODULE_UNLOAD
2065 { 2050 {
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
2341 return; 2326 return;
2342 2327
2343 cpu = kdb_process_cpu(p); 2328 cpu = kdb_process_cpu(p);
2344 kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", 2329 kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n",
2345 (void *)p, p->pid, p->parent->pid, 2330 (void *)p, p->pid, p->parent->pid,
2346 kdb_task_has_cpu(p), kdb_process_cpu(p), 2331 kdb_task_has_cpu(p), kdb_process_cpu(p),
2347 kdb_task_state_char(p), 2332 kdb_task_state_char(p),
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
2354 } else { 2339 } else {
2355 if (KDB_TSK(cpu) != p) 2340 if (KDB_TSK(cpu) != p)
2356 kdb_printf(" Error: does not match running " 2341 kdb_printf(" Error: does not match running "
2357 "process table (0x%p)\n", KDB_TSK(cpu)); 2342 "process table (0x%px)\n", KDB_TSK(cpu));
2358 } 2343 }
2359 } 2344 }
2360} 2345}
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
2687 for_each_kdbcmd(kp, i) { 2672 for_each_kdbcmd(kp, i) {
2688 if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { 2673 if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
2689 kdb_printf("Duplicate kdb command registered: " 2674 kdb_printf("Duplicate kdb command registered: "
2690 "%s, func %p help %s\n", cmd, func, help); 2675 "%s, func %px help %s\n", cmd, func, help);
2691 return 1; 2676 return 1;
2692 } 2677 }
2693 } 2678 }
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 1e5a502ba4a7..2118d8258b7c 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
83 unsigned long sym_start; 83 unsigned long sym_start;
84 unsigned long sym_end; 84 unsigned long sym_end;
85 } kdb_symtab_t; 85 } kdb_symtab_t;
86extern int kallsyms_symbol_next(char *prefix_name, int flag); 86extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
87extern int kallsyms_symbol_complete(char *prefix_name, int max_len); 87extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
88 88
89/* Exported Symbols for kernel loadable modules to use. */ 89/* Exported Symbols for kernel loadable modules to use. */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 990b3cc526c8..50bf9b119bad 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -40,7 +40,7 @@
40int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) 40int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
41{ 41{
42 if (KDB_DEBUG(AR)) 42 if (KDB_DEBUG(AR))
43 kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, 43 kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
44 symtab); 44 symtab);
45 memset(symtab, 0, sizeof(*symtab)); 45 memset(symtab, 0, sizeof(*symtab));
46 symtab->sym_start = kallsyms_lookup_name(symname); 46 symtab->sym_start = kallsyms_lookup_name(symname);
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
88 char *knt1 = NULL; 88 char *knt1 = NULL;
89 89
90 if (KDB_DEBUG(AR)) 90 if (KDB_DEBUG(AR))
91 kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab); 91 kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
92 memset(symtab, 0, sizeof(*symtab)); 92 memset(symtab, 0, sizeof(*symtab));
93 93
94 if (addr < 4096) 94 if (addr < 4096)
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
149 symtab->mod_name = "kernel"; 149 symtab->mod_name = "kernel";
150 if (KDB_DEBUG(AR)) 150 if (KDB_DEBUG(AR))
151 kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, " 151 kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
152 "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, 152 "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
153 symtab->sym_start, symtab->mod_name, symtab->sym_name, 153 symtab->sym_start, symtab->mod_name, symtab->sym_name,
154 symtab->sym_name); 154 symtab->sym_name);
155 155
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
221 * Parameters: 221 * Parameters:
222 * prefix_name prefix of a symbol name to lookup 222 * prefix_name prefix of a symbol name to lookup
223 * flag 0 means search from the head, 1 means continue search. 223 * flag 0 means search from the head, 1 means continue search.
224 * buf_size maximum length that can be written to prefix_name
225 * buffer
224 * Returns: 226 * Returns:
225 * 1 if a symbol matches the given prefix. 227 * 1 if a symbol matches the given prefix.
226 * 0 if no string found 228 * 0 if no string found
227 */ 229 */
228int kallsyms_symbol_next(char *prefix_name, int flag) 230int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
229{ 231{
230 int prefix_len = strlen(prefix_name); 232 int prefix_len = strlen(prefix_name);
231 static loff_t pos; 233 static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
235 pos = 0; 237 pos = 0;
236 238
237 while ((name = kdb_walk_kallsyms(&pos))) { 239 while ((name = kdb_walk_kallsyms(&pos))) {
238 if (strncmp(name, prefix_name, prefix_len) == 0) { 240 if (!strncmp(name, prefix_name, prefix_len))
239 strncpy(prefix_name, name, strlen(name)+1); 241 return strscpy(prefix_name, name, buf_size);
240 return 1;
241 }
242 } 242 }
243 return 0; 243 return 0;
244} 244}
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
432 *word = w8; 432 *word = w8;
433 break; 433 break;
434 } 434 }
435 /* drop through */ 435 /* fall through */
436 default: 436 default:
437 diag = KDB_BADWIDTH; 437 diag = KDB_BADWIDTH;
438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); 438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
481 *word = w8; 481 *word = w8;
482 break; 482 break;
483 } 483 }
484 /* drop through */ 484 /* fall through */
485 default: 485 default:
486 diag = KDB_BADWIDTH; 486 diag = KDB_BADWIDTH;
487 kdb_printf("kdb_getword: bad width %ld\n", (long) size); 487 kdb_printf("kdb_getword: bad width %ld\n", (long) size);
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
525 diag = kdb_putarea(addr, w8); 525 diag = kdb_putarea(addr, w8);
526 break; 526 break;
527 } 527 }
528 /* drop through */ 528 /* fall through */
529 default: 529 default:
530 diag = KDB_BADWIDTH; 530 diag = KDB_BADWIDTH;
531 kdb_printf("kdb_putword: bad width %ld\n", (long) size); 531 kdb_printf("kdb_putword: bad width %ld\n", (long) size);
@@ -887,13 +887,13 @@ void debug_kusage(void)
887 __func__, dah_first); 887 __func__, dah_first);
888 if (dah_first) { 888 if (dah_first) {
889 h_used = (struct debug_alloc_header *)debug_alloc_pool; 889 h_used = (struct debug_alloc_header *)debug_alloc_pool;
890 kdb_printf("%s: h_used %p size %d\n", __func__, h_used, 890 kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
891 h_used->size); 891 h_used->size);
892 } 892 }
893 do { 893 do {
894 h_used = (struct debug_alloc_header *) 894 h_used = (struct debug_alloc_header *)
895 ((char *)h_free + dah_overhead + h_free->size); 895 ((char *)h_free + dah_overhead + h_free->size);
896 kdb_printf("%s: h_used %p size %d caller %p\n", 896 kdb_printf("%s: h_used %px size %d caller %px\n",
897 __func__, h_used, h_used->size, h_used->caller); 897 __func__, h_used, h_used->size, h_used->caller);
898 h_free = (struct debug_alloc_header *) 898 h_free = (struct debug_alloc_header *)
899 (debug_alloc_pool + h_free->next); 899 (debug_alloc_pool + h_free->next);
@@ -902,7 +902,7 @@ void debug_kusage(void)
902 ((char *)h_free + dah_overhead + h_free->size); 902 ((char *)h_free + dah_overhead + h_free->size);
903 if ((char *)h_used - debug_alloc_pool != 903 if ((char *)h_used - debug_alloc_pool !=
904 sizeof(debug_alloc_pool_aligned)) 904 sizeof(debug_alloc_pool_aligned))
905 kdb_printf("%s: h_used %p size %d caller %p\n", 905 kdb_printf("%s: h_used %px size %d caller %px\n",
906 __func__, h_used, h_used->size, h_used->caller); 906 __func__, h_used, h_used->size, h_used->caller);
907out: 907out:
908 spin_unlock(&dap_lock); 908 spin_unlock(&dap_lock);
diff --git a/kernel/resource.c b/kernel/resource.c
index b3a3a1fc499e..b0fbf685c77a 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
319EXPORT_SYMBOL(release_resource); 319EXPORT_SYMBOL(release_resource);
320 320
321/** 321/**
322 * Finds the lowest iomem resource that covers part of [start..end]. The 322 * Finds the lowest iomem resource that covers part of [@start..@end]. The
323 * caller must specify start, end, flags, and desc (which may be 323 * caller must specify @start, @end, @flags, and @desc (which may be
324 * IORES_DESC_NONE). 324 * IORES_DESC_NONE).
325 * 325 *
326 * If a resource is found, returns 0 and *res is overwritten with the part 326 * If a resource is found, returns 0 and @*res is overwritten with the part
327 * of the resource that's within [start..end]; if none is found, returns 327 * of the resource that's within [@start..@end]; if none is found, returns
328 * -1. 328 * -1 or -EINVAL for other invalid parameters.
329 * 329 *
330 * This function walks the whole tree and not just first level children 330 * This function walks the whole tree and not just first level children
331 * unless @first_lvl is true. 331 * unless @first_lvl is true.
332 *
333 * @start: start address of the resource searched for
334 * @end: end address of same resource
335 * @flags: flags which the resource must have
336 * @desc: descriptor the resource must have
337 * @first_lvl: walk only the first level children, if set
338 * @res: return ptr, if resource found
332 */ 339 */
333static int find_next_iomem_res(resource_size_t start, resource_size_t end, 340static int find_next_iomem_res(resource_size_t start, resource_size_t end,
334 unsigned long flags, unsigned long desc, 341 unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
399 * @flags: I/O resource flags 406 * @flags: I/O resource flags
400 * @start: start addr 407 * @start: start addr
401 * @end: end addr 408 * @end: end addr
409 * @arg: function argument for the callback @func
410 * @func: callback function that is called for each qualifying resource area
402 * 411 *
403 * NOTE: For a new descriptor search, define a new IORES_DESC in 412 * NOTE: For a new descriptor search, define a new IORES_DESC in
404 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 413 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f26b70..091e089063be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5851,11 +5851,14 @@ void __init sched_init_smp(void)
5851 /* 5851 /*
5852 * There's no userspace yet to cause hotplug operations; hence all the 5852 * There's no userspace yet to cause hotplug operations; hence all the
5853 * CPU masks are stable and all blatant races in the below code cannot 5853 * CPU masks are stable and all blatant races in the below code cannot
5854 * happen. 5854 * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
5855 * but there won't be any contention on it.
5855 */ 5856 */
5857 cpus_read_lock();
5856 mutex_lock(&sched_domains_mutex); 5858 mutex_lock(&sched_domains_mutex);
5857 sched_init_domains(cpu_active_mask); 5859 sched_init_domains(cpu_active_mask);
5858 mutex_unlock(&sched_domains_mutex); 5860 mutex_unlock(&sched_domains_mutex);
5861 cpus_read_unlock();
5859 5862
5860 /* Move init over to a non-isolated CPU */ 5863 /* Move init over to a non-isolated CPU */
5861 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 5864 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb661cc..ac855b2f4774 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2400 local = 1; 2400 local = 1;
2401 2401
2402 /* 2402 /*
2403 * Retry task to preferred node migration periodically, in case it 2403 * Retry to migrate task to preferred node periodically, in case it
2404 * case it previously failed, or the scheduler moved us. 2404 * previously failed, or the scheduler moved us.
2405 */ 2405 */
2406 if (time_after(jiffies, p->numa_migrate_retry)) { 2406 if (time_after(jiffies, p->numa_migrate_retry)) {
2407 task_numa_placement(p); 2407 task_numa_placement(p);
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5674 return target; 5674 return target;
5675} 5675}
5676 5676
5677static unsigned long cpu_util_wake(int cpu, struct task_struct *p); 5677static unsigned long cpu_util_without(int cpu, struct task_struct *p);
5678 5678
5679static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) 5679static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
5680{ 5680{
5681 return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0); 5681 return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
5682} 5682}
5683 5683
5684/* 5684/*
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5738 5738
5739 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); 5739 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
5740 5740
5741 spare_cap = capacity_spare_wake(i, p); 5741 spare_cap = capacity_spare_without(i, p);
5742 5742
5743 if (spare_cap > max_spare_cap) 5743 if (spare_cap > max_spare_cap)
5744 max_spare_cap = spare_cap; 5744 max_spare_cap = spare_cap;
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
5889 return prev_cpu; 5889 return prev_cpu;
5890 5890
5891 /* 5891 /*
5892 * We need task's util for capacity_spare_wake, sync it up to prev_cpu's 5892 * We need task's util for capacity_spare_without, sync it up to
5893 * last_update_time. 5893 * prev_cpu's last_update_time.
5894 */ 5894 */
5895 if (!(sd_flag & SD_BALANCE_FORK)) 5895 if (!(sd_flag & SD_BALANCE_FORK))
5896 sync_entity_load_avg(&p->se); 5896 sync_entity_load_avg(&p->se);
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
6216} 6216}
6217 6217
6218/* 6218/*
6219 * cpu_util_wake: Compute CPU utilization with any contributions from 6219 * cpu_util_without: compute cpu utilization without any contributions from *p
6220 * the waking task p removed. 6220 * @cpu: the CPU which utilization is requested
6221 * @p: the task which utilization should be discounted
6222 *
6223 * The utilization of a CPU is defined by the utilization of tasks currently
6224 * enqueued on that CPU as well as tasks which are currently sleeping after an
6225 * execution on that CPU.
6226 *
6227 * This method returns the utilization of the specified CPU by discounting the
6228 * utilization of the specified task, whenever the task is currently
6229 * contributing to the CPU utilization.
6221 */ 6230 */
6222static unsigned long cpu_util_wake(int cpu, struct task_struct *p) 6231static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6223{ 6232{
6224 struct cfs_rq *cfs_rq; 6233 struct cfs_rq *cfs_rq;
6225 unsigned int util; 6234 unsigned int util;
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6231 cfs_rq = &cpu_rq(cpu)->cfs; 6240 cfs_rq = &cpu_rq(cpu)->cfs;
6232 util = READ_ONCE(cfs_rq->avg.util_avg); 6241 util = READ_ONCE(cfs_rq->avg.util_avg);
6233 6242
6234 /* Discount task's blocked util from CPU's util */ 6243 /* Discount task's util from CPU's util */
6235 util -= min_t(unsigned int, util, task_util(p)); 6244 util -= min_t(unsigned int, util, task_util(p));
6236 6245
6237 /* 6246 /*
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6240 * a) if *p is the only task sleeping on this CPU, then: 6249 * a) if *p is the only task sleeping on this CPU, then:
6241 * cpu_util (== task_util) > util_est (== 0) 6250 * cpu_util (== task_util) > util_est (== 0)
6242 * and thus we return: 6251 * and thus we return:
6243 * cpu_util_wake = (cpu_util - task_util) = 0 6252 * cpu_util_without = (cpu_util - task_util) = 0
6244 * 6253 *
6245 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6254 * b) if other tasks are SLEEPING on this CPU, which is now exiting
6246 * IDLE, then: 6255 * IDLE, then:
6247 * cpu_util >= task_util 6256 * cpu_util >= task_util
6248 * cpu_util > util_est (== 0) 6257 * cpu_util > util_est (== 0)
6249 * and thus we discount *p's blocked utilization to return: 6258 * and thus we discount *p's blocked utilization to return:
6250 * cpu_util_wake = (cpu_util - task_util) >= 0 6259 * cpu_util_without = (cpu_util - task_util) >= 0
6251 * 6260 *
6252 * c) if other tasks are RUNNABLE on that CPU and 6261 * c) if other tasks are RUNNABLE on that CPU and
6253 * util_est > cpu_util 6262 * util_est > cpu_util
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
6260 * covered by the following code when estimated utilization is 6269 * covered by the following code when estimated utilization is
6261 * enabled. 6270 * enabled.
6262 */ 6271 */
6263 if (sched_feat(UTIL_EST)) 6272 if (sched_feat(UTIL_EST)) {
6264 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); 6273 unsigned int estimated =
6274 READ_ONCE(cfs_rq->avg.util_est.enqueued);
6275
6276 /*
6277 * Despite the following checks we still have a small window
6278 * for a possible race, when an execl's select_task_rq_fair()
6279 * races with LB's detach_task():
6280 *
6281 * detach_task()
6282 * p->on_rq = TASK_ON_RQ_MIGRATING;
6283 * ---------------------------------- A
6284 * deactivate_task() \
6285 * dequeue_task() + RaceTime
6286 * util_est_dequeue() /
6287 * ---------------------------------- B
6288 *
6289 * The additional check on "current == p" it's required to
6290 * properly fix the execl regression and it helps in further
6291 * reducing the chances for the above race.
6292 */
6293 if (unlikely(task_on_rq_queued(p) || current == p)) {
6294 estimated -= min_t(unsigned int, estimated,
6295 (_task_util_est(p) | UTIL_AVG_UNCHANGED));
6296 }
6297 util = max(util, estimated);
6298 }
6265 6299
6266 /* 6300 /*
6267 * Utilization (estimated) can exceed the CPU capacity, thus let's 6301 * Utilization (estimated) can exceed the CPU capacity, thus let's
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7cdecfc010af..3d7355d7c3e3 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -633,38 +633,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
633 */ 633 */
634void cgroup_move_task(struct task_struct *task, struct css_set *to) 634void cgroup_move_task(struct task_struct *task, struct css_set *to)
635{ 635{
636 bool move_psi = !psi_disabled;
637 unsigned int task_flags = 0; 636 unsigned int task_flags = 0;
638 struct rq_flags rf; 637 struct rq_flags rf;
639 struct rq *rq; 638 struct rq *rq;
640 639
641 if (move_psi) { 640 if (psi_disabled) {
642 rq = task_rq_lock(task, &rf); 641 /*
642 * Lame to do this here, but the scheduler cannot be locked
643 * from the outside, so we move cgroups from inside sched/.
644 */
645 rcu_assign_pointer(task->cgroups, to);
646 return;
647 }
643 648
644 if (task_on_rq_queued(task)) 649 rq = task_rq_lock(task, &rf);
645 task_flags = TSK_RUNNING;
646 else if (task->in_iowait)
647 task_flags = TSK_IOWAIT;
648 650
649 if (task->flags & PF_MEMSTALL) 651 if (task_on_rq_queued(task))
650 task_flags |= TSK_MEMSTALL; 652 task_flags = TSK_RUNNING;
653 else if (task->in_iowait)
654 task_flags = TSK_IOWAIT;
651 655
652 if (task_flags) 656 if (task->flags & PF_MEMSTALL)
653 psi_task_change(task, task_flags, 0); 657 task_flags |= TSK_MEMSTALL;
654 }
655 658
656 /* 659 if (task_flags)
657 * Lame to do this here, but the scheduler cannot be locked 660 psi_task_change(task, task_flags, 0);
658 * from the outside, so we move cgroups from inside sched/. 661
659 */ 662 /* See comment above */
660 rcu_assign_pointer(task->cgroups, to); 663 rcu_assign_pointer(task->cgroups, to);
661 664
662 if (move_psi) { 665 if (task_flags)
663 if (task_flags) 666 psi_task_change(task, 0, task_flags);
664 psi_task_change(task, 0, task_flags);
665 667
666 task_rq_unlock(rq, task, &rf); 668 task_rq_unlock(rq, task, &rf);
667 }
668} 669}
669#endif /* CONFIG_CGROUPS */ 670#endif /* CONFIG_CGROUPS */
670 671
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf741b25..8f0644af40be 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
917 struct task_cputime cputime; 917 struct task_cputime cputime;
918 unsigned long soft; 918 unsigned long soft;
919 919
920 if (dl_task(tsk))
921 check_dl_overrun(tsk);
922
923 /* 920 /*
924 * If cputimer is not running, then there are no active 921 * If cputimer is not running, then there are no active
925 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 922 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 3ef15a6683c0..bd30e9398d2a 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
535 if (code[1].op != FETCH_OP_IMM) 535 if (code[1].op != FETCH_OP_IMM)
536 return -EINVAL; 536 return -EINVAL;
537 537
538 tmp = strpbrk("+-", code->data); 538 tmp = strpbrk(code->data, "+-");
539 if (tmp) 539 if (tmp)
540 c = *tmp; 540 c = *tmp;
541 ret = traceprobe_split_symbol_offset(code->data, 541 ret = traceprobe_split_symbol_offset(code->data,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index e5222b5fb4fe..923414a246e9 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
974 if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) 974 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
975 goto out; 975 goto out;
976 976
977 ret = sort_idmaps(&new_map);
978 if (ret < 0)
979 goto out;
980
981 ret = -EPERM; 977 ret = -EPERM;
982 /* Map the lower ids from the parent user namespace to the 978 /* Map the lower ids from the parent user namespace to the
983 * kernel global id space. 979 * kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1004 e->lower_first = lower_first; 1000 e->lower_first = lower_first;
1005 } 1001 }
1006 1002
1003 /*
1004 * If we want to use binary search for lookup, this clones the extent
1005 * array and sorts both copies.
1006 */
1007 ret = sort_idmaps(&new_map);
1008 if (ret < 0)
1009 goto out;
1010
1007 /* Install the map */ 1011 /* Install the map */
1008 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { 1012 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1009 memcpy(map->extent, new_map.extent, 1013 memcpy(map->extent, new_map.extent,