aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/net/busy_poll.h7
-rw-r--r--include/net/ip_tunnels.h14
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/uapi/linux/pkt_sched.h10
-rw-r--r--include/uapi/linux/snmp.h2
13 files changed, 87 insertions, 44 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..0807ddf97b05 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{ 417{
418 return pmd; 418 return pmd;
419} 419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
435
436static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
437{
438 return pte;
439}
440
441static inline pte_t pte_file_mksoft_dirty(pte_t pte)
442{
443 return pte;
444}
445
446static inline int pte_file_soft_dirty(pte_t pte)
447{
448 return 0;
449}
420#endif 450#endif
421 451
422#ifndef __HAVE_PFNMAP_TRACKING 452#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a41..5672d7ea1fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
112 112
113#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
114 114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118 unsigned long end); 118 unsigned long end);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e9e852..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
309 __be16 max_desc_sz_rq; 309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2]; 310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc; 311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4]; 312 __be32 max_qp_mcg;
313 __be16 max_qp_mcg; 313 u8 rsvd22[3];
314 u8 rsvd23;
315 u8 log_max_mcg; 314 u8 log_max_mcg;
316 u8 rsvd24; 315 u8 rsvd23;
317 u8 log_max_pd; 316 u8 log_max_pd;
318 u8 rsvd25; 317 u8 rsvd24;
319 u8 log_max_xrcd; 318 u8 log_max_xrcd;
320 u8 rsvd26[42]; 319 u8 rsvd25[42];
321 __be16 log_uar_page_sz; 320 __be16 log_uar_page_sz;
322 u8 rsvd27[28]; 321 u8 rsvd26[28];
323 u8 log_msx_atomic_size_qp; 322 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2]; 323 u8 rsvd27[2];
325 u8 log_msx_atomic_size_dc; 324 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76]; 325 u8 rsvd28[76];
327}; 326};
328 327
329 328
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
472struct mlx5_eqe_page_req { 471struct mlx5_eqe_page_req {
473 u8 rsvd0[2]; 472 u8 rsvd0[2];
474 __be16 func_id; 473 __be16 func_id;
475 u8 rsvd1[2]; 474 __be32 num_pages;
476 __be16 num_pages; 475 __be32 rsvd1[5];
477 __be32 rsvd2[5];
478}; 476};
479 477
480union ev_data { 478union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b0ced1..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
358 u32 reserved_lkey; 358 u32 reserved_lkey;
359 u8 local_ca_ack_delay; 359 u8 local_ca_ack_delay;
360 u8 log_max_mcg; 360 u8 log_max_mcg;
361 u16 max_qp_mcg; 361 u32 max_qp_mcg;
362 int min_page_sz; 362 int min_page_sz;
363}; 363};
364 364
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
691int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 691int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
694 s16 npages); 694 s32 npages);
695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
697void mlx5_register_debugfs(void); 697void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
733 733
734typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
735int mlx5_register_health_report_handler(health_handler_t handler);
736void mlx5_unregister_health_report_handler(void);
737const char *mlx5_command_str(int command); 734const char *mlx5_command_str(int command);
738int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 735int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
739void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 736void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490da030..e9995eb5985c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -314,6 +314,7 @@ struct nsproxy;
314struct user_namespace; 314struct user_namespace;
315 315
316#ifdef CONFIG_MMU 316#ifdef CONFIG_MMU
317extern unsigned long mmap_legacy_base(void);
317extern void arch_pick_mmap_layout(struct mm_struct *mm); 318extern void arch_pick_mmap_layout(struct mm_struct *mm);
318extern unsigned long 319extern unsigned long
319arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 320arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532 * Test if a process is not yet dead (at most zombie state) 1533 * Test if a process is not yet dead (at most zombie state)
1533 * If pid_alive fails, then pointers within the task structure 1534 * If pid_alive fails, then pointers within the task structure
1534 * can be stale and must not be dereferenced. 1535 * can be stale and must not be dereferenced.
1536 *
1537 * Return: 1 if the process is alive. 0 otherwise.
1535 */ 1538 */
1536static inline int pid_alive(struct task_struct *p) 1539static inline int pid_alive(struct task_struct *p)
1537{ 1540{
@@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p)
1543 * @tsk: Task structure to be checked. 1546 * @tsk: Task structure to be checked.
1544 * 1547 *
1545 * Check if a task structure is the first user space task the kernel created. 1548 * Check if a task structure is the first user space task the kernel created.
1549 *
1550 * Return: 1 if the task structure is init. 0 otherwise.
1546 */ 1551 */
1547static inline int is_global_init(struct task_struct *tsk) 1552static inline int is_global_init(struct task_struct *tsk)
1548{ 1553{
@@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu);
1894/** 1899/**
1895 * is_idle_task - is the specified task an idle task? 1900 * is_idle_task - is the specified task an idle task?
1896 * @p: the task in question. 1901 * @p: the task in question.
1902 *
1903 * Return: 1 if @p is an idle task. 0 otherwise.
1897 */ 1904 */
1898static inline bool is_idle_task(const struct task_struct *p) 1905static inline bool is_idle_task(const struct task_struct *p)
1899{ 1906{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
117#endif /*arch_spin_is_contended*/ 117#endif /*arch_spin_is_contended*/
118#endif 118#endif
119 119
120/* The lock does not imply full memory barrier. */ 120/*
121#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 121 * Despite its name it doesn't necessarily has to be a full barrier.
122static inline void smp_mb__after_lock(void) { smp_mb(); } 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
123#endif 131#endif
124 132
125/** 133/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte)); 69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
70 arch_entry = __pte_to_swp_entry(pte); 72 arch_entry = __pte_to_swp_entry(pte);
71 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
72} 74}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
803 int __user *); 803 int __user *);
804#else 804#else
805#ifdef CONFIG_CLONE_BACKWARDS3
806asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
807 int __user *, int);
808#else
805asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 809asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
806 int __user *, int); 810 int __user *, int);
807#endif 811#endif
812#endif
808 813
809asmlinkage long sys_execve(const char __user *filename, 814asmlinkage long sys_execve(const char __user *filename,
810 const char __user *const __user *argv, 815 const char __user *const __user *argv,
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f18b91966d3d..8a358a2c97e6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
122 if (rc > 0) 122 if (rc > 0)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_LOWLATENCYRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 126
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 128 !need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
162 return false; 162 return false;
163} 163}
164 164
165static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166{
167 return false;
168}
169
170static inline void skb_mark_napi_id(struct sk_buff *skb, 165static inline void skb_mark_napi_id(struct sk_buff *skb,
171 struct napi_struct *napi) 166 struct napi_struct *napi)
172{ 167{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2f..a354db5b7662 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
145 return INET_ECN_encapsulate(tos, inner); 145 return INET_ECN_encapsulate(tos, inner);
146} 146}
147 147
148static inline void tunnel_ip_select_ident(struct sk_buff *skb,
149 const struct iphdr *old_iph,
150 struct dst_entry *dst)
151{
152 struct iphdr *iph = ip_hdr(skb);
153
154 /* Use inner packet iph-id if possible. */
155 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
156 iph->id = old_iph->id;
157 else
158 __ip_select_ident(iph, dst,
159 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
160}
161
162int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 148int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
163int iptunnel_xmit(struct net *net, struct rtable *rt, 149int iptunnel_xmit(struct net *net, struct rtable *rt,
164 struct sk_buff *skb, 150 struct sk_buff *skb,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e59..e5ae0c50fa9c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@ struct psched_ratecfg {
683 u64 rate_bytes_ps; /* bytes per second */ 683 u64 rate_bytes_ps; /* bytes per second */
684 u32 mult; 684 u32 mult;
685 u16 overhead; 685 u16 overhead;
686 u8 linklayer;
686 u8 shift; 687 u8 shift;
687}; 688};
688 689
689static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 690static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
690 unsigned int len) 691 unsigned int len)
691{ 692{
692 return ((u64)(len + r->overhead) * r->mult) >> r->shift; 693 len += r->overhead;
694
695 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
696 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
697
698 return ((u64)len * r->mult) >> r->shift;
693} 699}
694 700
695extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); 701extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
700 memset(res, 0, sizeof(*res)); 706 memset(res, 0, sizeof(*res));
701 res->rate = r->rate_bytes_ps; 707 res->rate = r->rate_bytes_ps;
702 res->overhead = r->overhead; 708 res->overhead = r->overhead;
709 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
703} 710}
704 711
705#endif 712#endif
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8c..09d62b9228ff 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
73#define TC_H_ROOT (0xFFFFFFFFU) 73#define TC_H_ROOT (0xFFFFFFFFU)
74#define TC_H_INGRESS (0xFFFFFFF1U) 74#define TC_H_INGRESS (0xFFFFFFF1U)
75 75
76/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
77enum tc_link_layer {
78 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
79 TC_LINKLAYER_ETHERNET,
80 TC_LINKLAYER_ATM,
81};
82#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
83
76struct tc_ratespec { 84struct tc_ratespec {
77 unsigned char cell_log; 85 unsigned char cell_log;
78 unsigned char __reserved; 86 __u8 linklayer; /* lower 4 bits */
79 unsigned short overhead; 87 unsigned short overhead;
80 short cell_align; 88 short cell_align;
81 unsigned short mpu; 89 unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc677..a1356d3b54df 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@ enum
253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ 253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ 254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ 255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
256 LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ 256 LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
257 __LINUX_MIB_MAX 257 __LINUX_MIB_MAX
258}; 258};
259 259