diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 91 |
1 files changed, 66 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5e344bbe63ec..55f5ee7cc3d3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!( | |||
243 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 243 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
244 | (task->flags & PF_FROZEN) == 0) | 244 | (task->flags & PF_FROZEN) == 0) |
245 | 245 | ||
246 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | ||
247 | |||
248 | #define __set_task_state(tsk, state_value) \ | ||
249 | do { \ | ||
250 | (tsk)->task_state_change = _THIS_IP_; \ | ||
251 | (tsk)->state = (state_value); \ | ||
252 | } while (0) | ||
253 | #define set_task_state(tsk, state_value) \ | ||
254 | do { \ | ||
255 | (tsk)->task_state_change = _THIS_IP_; \ | ||
256 | set_mb((tsk)->state, (state_value)); \ | ||
257 | } while (0) | ||
258 | |||
259 | /* | ||
260 | * set_current_state() includes a barrier so that the write of current->state | ||
261 | * is correctly serialised wrt the caller's subsequent test of whether to | ||
262 | * actually sleep: | ||
263 | * | ||
264 | * set_current_state(TASK_UNINTERRUPTIBLE); | ||
265 | * if (do_i_need_to_sleep()) | ||
266 | * schedule(); | ||
267 | * | ||
268 | * If the caller does not need such serialisation then use __set_current_state() | ||
269 | */ | ||
270 | #define __set_current_state(state_value) \ | ||
271 | do { \ | ||
272 | current->task_state_change = _THIS_IP_; \ | ||
273 | current->state = (state_value); \ | ||
274 | } while (0) | ||
275 | #define set_current_state(state_value) \ | ||
276 | do { \ | ||
277 | current->task_state_change = _THIS_IP_; \ | ||
278 | set_mb(current->state, (state_value)); \ | ||
279 | } while (0) | ||
280 | |||
281 | #else | ||
282 | |||
246 | #define __set_task_state(tsk, state_value) \ | 283 | #define __set_task_state(tsk, state_value) \ |
247 | do { (tsk)->state = (state_value); } while (0) | 284 | do { (tsk)->state = (state_value); } while (0) |
248 | #define set_task_state(tsk, state_value) \ | 285 | #define set_task_state(tsk, state_value) \ |
@@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!( | |||
259 | * | 296 | * |
260 | * If the caller does not need such serialisation then use __set_current_state() | 297 | * If the caller does not need such serialisation then use __set_current_state() |
261 | */ | 298 | */ |
262 | #define __set_current_state(state_value) \ | 299 | #define __set_current_state(state_value) \ |
263 | do { current->state = (state_value); } while (0) | 300 | do { current->state = (state_value); } while (0) |
264 | #define set_current_state(state_value) \ | 301 | #define set_current_state(state_value) \ |
265 | set_mb(current->state, (state_value)) | 302 | set_mb(current->state, (state_value)) |
266 | 303 | ||
304 | #endif | ||
305 | |||
267 | /* Task command name length */ | 306 | /* Task command name length */ |
268 | #define TASK_COMM_LEN 16 | 307 | #define TASK_COMM_LEN 16 |
269 | 308 | ||
@@ -1278,9 +1317,9 @@ struct task_struct { | |||
1278 | union rcu_special rcu_read_unlock_special; | 1317 | union rcu_special rcu_read_unlock_special; |
1279 | struct list_head rcu_node_entry; | 1318 | struct list_head rcu_node_entry; |
1280 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1319 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1281 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1320 | #ifdef CONFIG_PREEMPT_RCU |
1282 | struct rcu_node *rcu_blocked_node; | 1321 | struct rcu_node *rcu_blocked_node; |
1283 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1322 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1284 | #ifdef CONFIG_TASKS_RCU | 1323 | #ifdef CONFIG_TASKS_RCU |
1285 | unsigned long rcu_tasks_nvcsw; | 1324 | unsigned long rcu_tasks_nvcsw; |
1286 | bool rcu_tasks_holdout; | 1325 | bool rcu_tasks_holdout; |
@@ -1558,28 +1597,23 @@ struct task_struct { | |||
1558 | struct numa_group *numa_group; | 1597 | struct numa_group *numa_group; |
1559 | 1598 | ||
1560 | /* | 1599 | /* |
1561 | * Exponential decaying average of faults on a per-node basis. | 1600 | * numa_faults is an array split into four regions: |
1562 | * Scheduling placement decisions are made based on the these counts. | 1601 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer |
1563 | * The values remain static for the duration of a PTE scan | 1602 | * in this precise order. |
1603 | * | ||
1604 | * faults_memory: Exponential decaying average of faults on a per-node | ||
1605 | * basis. Scheduling placement decisions are made based on these | ||
1606 | * counts. The values remain static for the duration of a PTE scan. | ||
1607 | * faults_cpu: Track the nodes the process was running on when a NUMA | ||
1608 | * hinting fault was incurred. | ||
1609 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node | ||
1610 | * during the current scan window. When the scan completes, the counts | ||
1611 | * in faults_memory and faults_cpu decay and these values are copied. | ||
1564 | */ | 1612 | */ |
1565 | unsigned long *numa_faults_memory; | 1613 | unsigned long *numa_faults; |
1566 | unsigned long total_numa_faults; | 1614 | unsigned long total_numa_faults; |
1567 | 1615 | ||
1568 | /* | 1616 | /* |
1569 | * numa_faults_buffer records faults per node during the current | ||
1570 | * scan window. When the scan completes, the counts in | ||
1571 | * numa_faults_memory decay and these values are copied. | ||
1572 | */ | ||
1573 | unsigned long *numa_faults_buffer_memory; | ||
1574 | |||
1575 | /* | ||
1576 | * Track the nodes the process was running on when a NUMA hinting | ||
1577 | * fault was incurred. | ||
1578 | */ | ||
1579 | unsigned long *numa_faults_cpu; | ||
1580 | unsigned long *numa_faults_buffer_cpu; | ||
1581 | |||
1582 | /* | ||
1583 | * numa_faults_locality tracks if faults recorded during the last | 1617 | * numa_faults_locality tracks if faults recorded during the last |
1584 | * scan window were remote/local. The task scan period is adapted | 1618 | * scan window were remote/local. The task scan period is adapted |
1585 | * based on the locality of the faults with different weights | 1619 | * based on the locality of the faults with different weights |
@@ -1661,6 +1695,9 @@ struct task_struct { | |||
1661 | unsigned int sequential_io; | 1695 | unsigned int sequential_io; |
1662 | unsigned int sequential_io_avg; | 1696 | unsigned int sequential_io_avg; |
1663 | #endif | 1697 | #endif |
1698 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | ||
1699 | unsigned long task_state_change; | ||
1700 | #endif | ||
1664 | }; | 1701 | }; |
1665 | 1702 | ||
1666 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1703 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -2052,6 +2089,10 @@ static inline void tsk_restore_flags(struct task_struct *task, | |||
2052 | task->flags |= orig_flags & flags; | 2089 | task->flags |= orig_flags & flags; |
2053 | } | 2090 | } |
2054 | 2091 | ||
2092 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, | ||
2093 | const struct cpumask *trial); | ||
2094 | extern int task_can_attach(struct task_struct *p, | ||
2095 | const struct cpumask *cs_cpus_allowed); | ||
2055 | #ifdef CONFIG_SMP | 2096 | #ifdef CONFIG_SMP |
2056 | extern void do_set_cpus_allowed(struct task_struct *p, | 2097 | extern void do_set_cpus_allowed(struct task_struct *p, |
2057 | const struct cpumask *new_mask); | 2098 | const struct cpumask *new_mask); |
@@ -2760,7 +2801,7 @@ static inline int signal_pending_state(long state, struct task_struct *p) | |||
2760 | extern int _cond_resched(void); | 2801 | extern int _cond_resched(void); |
2761 | 2802 | ||
2762 | #define cond_resched() ({ \ | 2803 | #define cond_resched() ({ \ |
2763 | __might_sleep(__FILE__, __LINE__, 0); \ | 2804 | ___might_sleep(__FILE__, __LINE__, 0); \ |
2764 | _cond_resched(); \ | 2805 | _cond_resched(); \ |
2765 | }) | 2806 | }) |
2766 | 2807 | ||
@@ -2773,14 +2814,14 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
2773 | #endif | 2814 | #endif |
2774 | 2815 | ||
2775 | #define cond_resched_lock(lock) ({ \ | 2816 | #define cond_resched_lock(lock) ({ \ |
2776 | __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ | 2817 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
2777 | __cond_resched_lock(lock); \ | 2818 | __cond_resched_lock(lock); \ |
2778 | }) | 2819 | }) |
2779 | 2820 | ||
2780 | extern int __cond_resched_softirq(void); | 2821 | extern int __cond_resched_softirq(void); |
2781 | 2822 | ||
2782 | #define cond_resched_softirq() ({ \ | 2823 | #define cond_resched_softirq() ({ \ |
2783 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ | 2824 | ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2784 | __cond_resched_softirq(); \ | 2825 | __cond_resched_softirq(); \ |
2785 | }) | 2826 | }) |
2786 | 2827 | ||