aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-04-05 17:14:19 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-05 17:14:19 -0400
commitf83f7151950dd9e0f6b4a1a405bf5e55c5294e4d (patch)
treef8d9d8ee821fcc9f0a8e1a8679bc622219c70e3b /kernel
parent8f4043f1253292495dbf9c8be0c1b07b4b9902b7 (diff)
parent7f46774c6480174eb869a3c15167eafac467a6af (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor comment merge conflict in mlx5. Staging driver has a fixup due to the skb->xmit_more changes in 'net-next', but was removed in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/cpumap.c13
-rw-r--r--kernel/bpf/inode.c32
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/watchdog.c6
7 files changed, 70 insertions, 34 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8974b3755670..3c18260403dd 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, 162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
163 struct xdp_frame *xdpf) 163 struct xdp_frame *xdpf)
164{ 164{
165 unsigned int hard_start_headroom;
165 unsigned int frame_size; 166 unsigned int frame_size;
166 void *pkt_data_start; 167 void *pkt_data_start;
167 struct sk_buff *skb; 168 struct sk_buff *skb;
168 169
170 /* Part of headroom was reserved to xdpf */
171 hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
172
169 /* build_skb need to place skb_shared_info after SKB end, and 173 /* build_skb need to place skb_shared_info after SKB end, and
170 * also want to know the memory "truesize". Thus, need to 174 * also want to know the memory "truesize". Thus, need to
171 * know the memory frame size backing xdp_buff. 175 * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
183 * is not at a fixed memory location, with mixed length 187 * is not at a fixed memory location, with mixed length
184 * packets, which is bad for cache-line hotness. 188 * packets, which is bad for cache-line hotness.
185 */ 189 */
186 frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + 190 frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 191 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
188 192
189 pkt_data_start = xdpf->data - xdpf->headroom; 193 pkt_data_start = xdpf->data - hard_start_headroom;
190 skb = build_skb(pkt_data_start, frame_size); 194 skb = build_skb(pkt_data_start, frame_size);
191 if (!skb) 195 if (!skb)
192 return NULL; 196 return NULL;
193 197
194 skb_reserve(skb, xdpf->headroom); 198 skb_reserve(skb, hard_start_headroom);
195 __skb_put(skb, xdpf->len); 199 __skb_put(skb, xdpf->len);
196 if (xdpf->metasize) 200 if (xdpf->metasize)
197 skb_metadata_set(skb, xdpf->metasize); 201 skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
205 * - RX ring dev queue index (skb_record_rx_queue) 209 * - RX ring dev queue index (skb_record_rx_queue)
206 */ 210 */
207 211
212 /* Allow SKB to reuse area used by xdp_frame */
213 xdp_scrub_frame(xdpf);
214
208 return skb; 215 return skb;
209} 216}
210 217
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 2ada5e21dfa6..4a8f390a2b82 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
554} 554}
555EXPORT_SYMBOL(bpf_prog_get_type_path); 555EXPORT_SYMBOL(bpf_prog_get_type_path);
556 556
557static void bpf_evict_inode(struct inode *inode)
558{
559 enum bpf_type type;
560
561 truncate_inode_pages_final(&inode->i_data);
562 clear_inode(inode);
563
564 if (S_ISLNK(inode->i_mode))
565 kfree(inode->i_link);
566 if (!bpf_inode_type(inode, &type))
567 bpf_any_put(inode->i_private, type);
568}
569
570/* 557/*
571 * Display the mount options in /proc/mounts. 558 * Display the mount options in /proc/mounts.
572 */ 559 */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
579 return 0; 566 return 0;
580} 567}
581 568
569static void bpf_destroy_inode_deferred(struct rcu_head *head)
570{
571 struct inode *inode = container_of(head, struct inode, i_rcu);
572 enum bpf_type type;
573
574 if (S_ISLNK(inode->i_mode))
575 kfree(inode->i_link);
576 if (!bpf_inode_type(inode, &type))
577 bpf_any_put(inode->i_private, type);
578 free_inode_nonrcu(inode);
579}
580
581static void bpf_destroy_inode(struct inode *inode)
582{
583 call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
584}
585
582static const struct super_operations bpf_super_ops = { 586static const struct super_operations bpf_super_ops = {
583 .statfs = simple_statfs, 587 .statfs = simple_statfs,
584 .drop_inode = generic_delete_inode, 588 .drop_inode = generic_delete_inode,
585 .show_options = bpf_show_options, 589 .show_options = bpf_show_options,
586 .evict_inode = bpf_evict_inode, 590 .destroy_inode = bpf_destroy_inode,
587}; 591};
588 592
589enum { 593enum {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2fe89138309a..b7ad8003c4e6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1898,8 +1898,9 @@ continue_func:
1898 } 1898 }
1899 frame++; 1899 frame++;
1900 if (frame >= MAX_CALL_FRAMES) { 1900 if (frame >= MAX_CALL_FRAMES) {
1901 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1901 verbose(env, "the call stack of %d frames is too deep !\n",
1902 return -EFAULT; 1902 frame);
1903 return -E2BIG;
1903 } 1904 }
1904 goto process_func; 1905 goto process_func;
1905 } 1906 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 025f419d16f6..6754f3ecfd94 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
565} 565}
566 566
567static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
568{
569 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
570 return true;
571 /*
572 * When CPU hotplug is disabled, then taking the CPU down is not
573 * possible because takedown_cpu() and the architecture and
574 * subsystem specific mechanisms are not available. So the CPU
575 * which would be completely unplugged again needs to stay around
576 * in the current state.
577 */
578 return st->state <= CPUHP_BRINGUP_CPU;
579}
580
567static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 581static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
568 enum cpuhp_state target) 582 enum cpuhp_state target)
569{ 583{
@@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
574 st->state++; 588 st->state++;
575 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 589 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
576 if (ret) { 590 if (ret) {
577 st->target = prev_state; 591 if (can_rollback_cpu(st)) {
578 undo_cpu_up(cpu, st); 592 st->target = prev_state;
593 undo_cpu_up(cpu, st);
594 }
579 break; 595 break;
580 } 596 }
581 } 597 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 771e93f9c43f..6f357f4fc859 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
29#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
30#include <linux/cn_proc.h> 30#include <linux/cn_proc.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/sched/signal.h>
32 33
33/* 34/*
34 * Access another process' address space via ptrace. 35 * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
924 ret = ptrace_setsiginfo(child, &siginfo); 925 ret = ptrace_setsiginfo(child, &siginfo);
925 break; 926 break;
926 927
927 case PTRACE_GETSIGMASK: 928 case PTRACE_GETSIGMASK: {
929 sigset_t *mask;
930
928 if (addr != sizeof(sigset_t)) { 931 if (addr != sizeof(sigset_t)) {
929 ret = -EINVAL; 932 ret = -EINVAL;
930 break; 933 break;
931 } 934 }
932 935
933 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 936 if (test_tsk_restore_sigmask(child))
937 mask = &child->saved_sigmask;
938 else
939 mask = &child->blocked;
940
941 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
934 ret = -EFAULT; 942 ret = -EFAULT;
935 else 943 else
936 ret = 0; 944 ret = 0;
937 945
938 break; 946 break;
947 }
939 948
940 case PTRACE_SETSIGMASK: { 949 case PTRACE_SETSIGMASK: {
941 sigset_t new_set; 950 sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
961 child->blocked = new_set; 970 child->blocked = new_set;
962 spin_unlock_irq(&child->sighand->siglock); 971 spin_unlock_irq(&child->sighand->siglock);
963 972
973 clear_tsk_restore_sigmask(child);
974
964 ret = 0; 975 ret = 0;
965 break; 976 break;
966 } 977 }
diff --git a/kernel/signal.c b/kernel/signal.c
index b7953934aa99..f98448cf2def 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3605 if (unlikely(sig != kinfo.si_signo)) 3605 if (unlikely(sig != kinfo.si_signo))
3606 goto err; 3606 goto err;
3607 3607
3608 /* Only allow sending arbitrary signals to yourself. */
3609 ret = -EPERM;
3608 if ((task_pid(current) != pid) && 3610 if ((task_pid(current) != pid) &&
3609 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) { 3611 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3610 /* Only allow sending arbitrary signals to yourself. */ 3612 goto err;
3611 ret = -EPERM;
3612 if (kinfo.si_code != SI_USER)
3613 goto err;
3614
3615 /* Turn this into a regular kill signal. */
3616 prepare_kill_siginfo(sig, &kinfo);
3617 }
3618 } else { 3613 } else {
3619 prepare_kill_siginfo(sig, &kinfo); 3614 prepare_kill_siginfo(sig, &kinfo);
3620 } 3615 }
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 403c9bd90413..6a5787233113 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
554 554
555int lockup_detector_online_cpu(unsigned int cpu) 555int lockup_detector_online_cpu(unsigned int cpu)
556{ 556{
557 watchdog_enable(cpu); 557 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
558 watchdog_enable(cpu);
558 return 0; 559 return 0;
559} 560}
560 561
561int lockup_detector_offline_cpu(unsigned int cpu) 562int lockup_detector_offline_cpu(unsigned int cpu)
562{ 563{
563 watchdog_disable(cpu); 564 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
565 watchdog_disable(cpu);
564 return 0; 566 return 0;
565} 567}
566 568