diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 20:19:27 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 20:19:28 -0400 |
| commit | 532bfc851a7475fb6a36c1e953aa395798a7cca7 (patch) | |
| tree | a7892e5a31330dd59f31959efbe9fda1803784fd /include/linux | |
| parent | 0195c00244dc2e9f522475868fa278c473ba7339 (diff) | |
| parent | 8da00edc1069f01c34510fa405dc15d96c090a3f (diff) | |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge third batch of patches from Andrew Morton:
- Some MM stragglers
- core SMP library cleanups (on_each_cpu_mask)
- Some IPI optimisations
- kexec
- kdump
- IPMI
- the radix-tree iterator work
- various other misc bits.
"That'll do for -rc1. I still have ~10 patches for 3.4, will send
those along when they've baked a little more."
* emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
backlight: fix typo in tosa_lcd.c
crc32: add help text for the algorithm select option
mm: move hugepage test examples to tools/testing/selftests/vm
mm: move slabinfo.c to tools/vm
mm: move page-types.c from Documentation to tools/vm
selftests/Makefile: make `run_tests' depend on `all'
selftests: launch individual selftests from the main Makefile
radix-tree: use iterators in find_get_pages* functions
radix-tree: rewrite gang lookup using iterator
radix-tree: introduce bit-optimized iterator
fs/proc/namespaces.c: prevent crash when ns_entries[] is empty
nbd: rename the nbd_device variable from lo to nbd
pidns: add reboot_pid_ns() to handle the reboot syscall
sysctl: use bitmap library functions
ipmi: use locks on watchdog timeout set on reboot
ipmi: simplify locking
ipmi: fix message handling during panics
ipmi: use a tasklet for handling received messages
ipmi: increase KCS timeouts
ipmi: decrease the IPMI message transaction time in interrupt mode
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/cpumask.h | 3 | ||||
| -rw-r--r-- | include/linux/mm.h | 2 | ||||
| -rw-r--r-- | include/linux/pid_namespace.h | 8 | ||||
| -rw-r--r-- | include/linux/radix-tree.h | 196 | ||||
| -rw-r--r-- | include/linux/smp.h | 46 | ||||
| -rw-r--r-- | include/linux/swap.h | 3 |
6 files changed, 254 insertions, 4 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 7b9b75a529be..1ffdb9856bb9 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -810,11 +810,10 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) | |||
| 810 | #else /* NR_CPUS > 1 */ | 810 | #else /* NR_CPUS > 1 */ |
| 811 | int __first_cpu(const cpumask_t *srcp); | 811 | int __first_cpu(const cpumask_t *srcp); |
| 812 | int __next_cpu(int n, const cpumask_t *srcp); | 812 | int __next_cpu(int n, const cpumask_t *srcp); |
| 813 | int __any_online_cpu(const cpumask_t *mask); | ||
| 814 | 813 | ||
| 815 | #define first_cpu(src) __first_cpu(&(src)) | 814 | #define first_cpu(src) __first_cpu(&(src)) |
| 816 | #define next_cpu(n, src) __next_cpu((n), &(src)) | 815 | #define next_cpu(n, src) __next_cpu((n), &(src)) |
| 817 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | 816 | #define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) |
| 818 | #define for_each_cpu_mask(cpu, mask) \ | 817 | #define for_each_cpu_mask(cpu, mask) \ |
| 819 | for ((cpu) = -1; \ | 818 | for ((cpu) = -1; \ |
| 820 | (cpu) = next_cpu((cpu), (mask)), \ | 819 | (cpu) = next_cpu((cpu), (mask)), \ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f2a60dde8c9e..d8738a464b94 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -954,7 +954,7 @@ extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); | |||
| 954 | extern void truncate_setsize(struct inode *inode, loff_t newsize); | 954 | extern void truncate_setsize(struct inode *inode, loff_t newsize); |
| 955 | extern int vmtruncate(struct inode *inode, loff_t offset); | 955 | extern int vmtruncate(struct inode *inode, loff_t offset); |
| 956 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); | 956 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); |
| 957 | 957 | void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); | |
| 958 | int truncate_inode_page(struct address_space *mapping, struct page *page); | 958 | int truncate_inode_page(struct address_space *mapping, struct page *page); |
| 959 | int generic_error_remove_page(struct address_space *mapping, struct page *page); | 959 | int generic_error_remove_page(struct address_space *mapping, struct page *page); |
| 960 | 960 | ||
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f5bd679be46b..b067bd8c49d0 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h | |||
| @@ -33,6 +33,7 @@ struct pid_namespace { | |||
| 33 | #endif | 33 | #endif |
| 34 | gid_t pid_gid; | 34 | gid_t pid_gid; |
| 35 | int hide_pid; | 35 | int hide_pid; |
| 36 | int reboot; /* group exit code if this pidns was rebooted */ | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | extern struct pid_namespace init_pid_ns; | 39 | extern struct pid_namespace init_pid_ns; |
| @@ -48,6 +49,7 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) | |||
| 48 | extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *ns); | 49 | extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *ns); |
| 49 | extern void free_pid_ns(struct kref *kref); | 50 | extern void free_pid_ns(struct kref *kref); |
| 50 | extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); | 51 | extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); |
| 52 | extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); | ||
| 51 | 53 | ||
| 52 | static inline void put_pid_ns(struct pid_namespace *ns) | 54 | static inline void put_pid_ns(struct pid_namespace *ns) |
| 53 | { | 55 | { |
| @@ -75,11 +77,15 @@ static inline void put_pid_ns(struct pid_namespace *ns) | |||
| 75 | { | 77 | { |
| 76 | } | 78 | } |
| 77 | 79 | ||
| 78 | |||
| 79 | static inline void zap_pid_ns_processes(struct pid_namespace *ns) | 80 | static inline void zap_pid_ns_processes(struct pid_namespace *ns) |
| 80 | { | 81 | { |
| 81 | BUG(); | 82 | BUG(); |
| 82 | } | 83 | } |
| 84 | |||
| 85 | static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) | ||
| 86 | { | ||
| 87 | return 0; | ||
| 88 | } | ||
| 83 | #endif /* CONFIG_PID_NS */ | 89 | #endif /* CONFIG_PID_NS */ |
| 84 | 90 | ||
| 85 | extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); | 91 | extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index e9a48234e693..0d04cd69ab9b 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * Copyright (C) 2001 Momchil Velikov | 2 | * Copyright (C) 2001 Momchil Velikov |
| 3 | * Portions Copyright (C) 2001 Christoph Hellwig | 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
| 4 | * Copyright (C) 2006 Nick Piggin | 4 | * Copyright (C) 2006 Nick Piggin |
| 5 | * Copyright (C) 2012 Konstantin Khlebnikov | ||
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License as | 8 | * modify it under the terms of the GNU General Public License as |
| @@ -257,4 +258,199 @@ static inline void radix_tree_preload_end(void) | |||
| 257 | preempt_enable(); | 258 | preempt_enable(); |
| 258 | } | 259 | } |
| 259 | 260 | ||
| 261 | /** | ||
| 262 | * struct radix_tree_iter - radix tree iterator state | ||
| 263 | * | ||
| 264 | * @index: index of current slot | ||
| 265 | * @next_index: next-to-last index for this chunk | ||
| 266 | * @tags: bit-mask for tag-iterating | ||
| 267 | * | ||
| 268 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | ||
| 269 | * subinterval of slots contained within one radix tree leaf node. It is | ||
| 270 | * described by a pointer to its first slot and a struct radix_tree_iter | ||
| 271 | * which holds the chunk's position in the tree and its size. For tagged | ||
| 272 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | ||
| 273 | * radix tree tag. | ||
| 274 | */ | ||
| 275 | struct radix_tree_iter { | ||
| 276 | unsigned long index; | ||
| 277 | unsigned long next_index; | ||
| 278 | unsigned long tags; | ||
| 279 | }; | ||
| 280 | |||
| 281 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ | ||
| 282 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ | ||
| 283 | #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ | ||
| 284 | |||
| 285 | /** | ||
| 286 | * radix_tree_iter_init - initialize radix tree iterator | ||
| 287 | * | ||
| 288 | * @iter: pointer to iterator state | ||
| 289 | * @start: iteration starting index | ||
| 290 | * Returns: NULL | ||
| 291 | */ | ||
| 292 | static __always_inline void ** | ||
| 293 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) | ||
| 294 | { | ||
| 295 | /* | ||
| 296 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it | ||
| 297 | * in the case of a successful tagged chunk lookup. If the lookup was | ||
| 298 | * unsuccessful or non-tagged then nobody cares about ->tags. | ||
| 299 | * | ||
| 300 | * Set index to zero to bypass next_index overflow protection. | ||
| 301 | * See the comment in radix_tree_next_chunk() for details. | ||
| 302 | */ | ||
| 303 | iter->index = 0; | ||
| 304 | iter->next_index = start; | ||
| 305 | return NULL; | ||
| 306 | } | ||
| 307 | |||
| 308 | /** | ||
| 309 | * radix_tree_next_chunk - find next chunk of slots for iteration | ||
| 310 | * | ||
| 311 | * @root: radix tree root | ||
| 312 | * @iter: iterator state | ||
| 313 | * @flags: RADIX_TREE_ITER_* flags and tag index | ||
| 314 | * Returns: pointer to chunk first slot, or NULL if there no more left | ||
| 315 | * | ||
| 316 | * This function looks up the next chunk in the radix tree starting from | ||
| 317 | * @iter->next_index. It returns a pointer to the chunk's first slot. | ||
| 318 | * Also it fills @iter with data about chunk: position in the tree (index), | ||
| 319 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). | ||
| 320 | */ | ||
| 321 | void **radix_tree_next_chunk(struct radix_tree_root *root, | ||
| 322 | struct radix_tree_iter *iter, unsigned flags); | ||
| 323 | |||
| 324 | /** | ||
| 325 | * radix_tree_chunk_size - get current chunk size | ||
| 326 | * | ||
| 327 | * @iter: pointer to radix tree iterator | ||
| 328 | * Returns: current chunk size | ||
| 329 | */ | ||
| 330 | static __always_inline unsigned | ||
| 331 | radix_tree_chunk_size(struct radix_tree_iter *iter) | ||
| 332 | { | ||
| 333 | return iter->next_index - iter->index; | ||
| 334 | } | ||
| 335 | |||
| 336 | /** | ||
| 337 | * radix_tree_next_slot - find next slot in chunk | ||
| 338 | * | ||
| 339 | * @slot: pointer to current slot | ||
| 340 | * @iter: pointer to interator state | ||
| 341 | * @flags: RADIX_TREE_ITER_*, should be constant | ||
| 342 | * Returns: pointer to next slot, or NULL if there no more left | ||
| 343 | * | ||
| 344 | * This function updates @iter->index in the case of a successful lookup. | ||
| 345 | * For tagged lookup it also eats @iter->tags. | ||
| 346 | */ | ||
| 347 | static __always_inline void ** | ||
| 348 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | ||
| 349 | { | ||
| 350 | if (flags & RADIX_TREE_ITER_TAGGED) { | ||
| 351 | iter->tags >>= 1; | ||
| 352 | if (likely(iter->tags & 1ul)) { | ||
| 353 | iter->index++; | ||
| 354 | return slot + 1; | ||
| 355 | } | ||
| 356 | if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { | ||
| 357 | unsigned offset = __ffs(iter->tags); | ||
| 358 | |||
| 359 | iter->tags >>= offset; | ||
| 360 | iter->index += offset + 1; | ||
| 361 | return slot + offset + 1; | ||
| 362 | } | ||
| 363 | } else { | ||
| 364 | unsigned size = radix_tree_chunk_size(iter) - 1; | ||
| 365 | |||
| 366 | while (size--) { | ||
| 367 | slot++; | ||
| 368 | iter->index++; | ||
| 369 | if (likely(*slot)) | ||
| 370 | return slot; | ||
| 371 | if (flags & RADIX_TREE_ITER_CONTIG) | ||
| 372 | break; | ||
| 373 | } | ||
| 374 | } | ||
| 375 | return NULL; | ||
| 376 | } | ||
| 377 | |||
| 378 | /** | ||
| 379 | * radix_tree_for_each_chunk - iterate over chunks | ||
| 380 | * | ||
| 381 | * @slot: the void** variable for pointer to chunk first slot | ||
| 382 | * @root: the struct radix_tree_root pointer | ||
| 383 | * @iter: the struct radix_tree_iter pointer | ||
| 384 | * @start: iteration starting index | ||
| 385 | * @flags: RADIX_TREE_ITER_* and tag index | ||
| 386 | * | ||
| 387 | * Locks can be released and reacquired between iterations. | ||
| 388 | */ | ||
| 389 | #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ | ||
| 390 | for (slot = radix_tree_iter_init(iter, start) ; \ | ||
| 391 | (slot = radix_tree_next_chunk(root, iter, flags)) ;) | ||
| 392 | |||
| 393 | /** | ||
| 394 | * radix_tree_for_each_chunk_slot - iterate over slots in one chunk | ||
| 395 | * | ||
| 396 | * @slot: the void** variable, at the beginning points to chunk first slot | ||
| 397 | * @iter: the struct radix_tree_iter pointer | ||
| 398 | * @flags: RADIX_TREE_ITER_*, should be constant | ||
| 399 | * | ||
| 400 | * This macro is designed to be nested inside radix_tree_for_each_chunk(). | ||
| 401 | * @slot points to the radix tree slot, @iter->index contains its index. | ||
| 402 | */ | ||
| 403 | #define radix_tree_for_each_chunk_slot(slot, iter, flags) \ | ||
| 404 | for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) | ||
| 405 | |||
| 406 | /** | ||
| 407 | * radix_tree_for_each_slot - iterate over non-empty slots | ||
| 408 | * | ||
| 409 | * @slot: the void** variable for pointer to slot | ||
| 410 | * @root: the struct radix_tree_root pointer | ||
| 411 | * @iter: the struct radix_tree_iter pointer | ||
| 412 | * @start: iteration starting index | ||
| 413 | * | ||
| 414 | * @slot points to radix tree slot, @iter->index contains its index. | ||
| 415 | */ | ||
| 416 | #define radix_tree_for_each_slot(slot, root, iter, start) \ | ||
| 417 | for (slot = radix_tree_iter_init(iter, start) ; \ | ||
| 418 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ | ||
| 419 | slot = radix_tree_next_slot(slot, iter, 0)) | ||
| 420 | |||
| 421 | /** | ||
| 422 | * radix_tree_for_each_contig - iterate over contiguous slots | ||
| 423 | * | ||
| 424 | * @slot: the void** variable for pointer to slot | ||
| 425 | * @root: the struct radix_tree_root pointer | ||
| 426 | * @iter: the struct radix_tree_iter pointer | ||
| 427 | * @start: iteration starting index | ||
| 428 | * | ||
| 429 | * @slot points to radix tree slot, @iter->index contains its index. | ||
| 430 | */ | ||
| 431 | #define radix_tree_for_each_contig(slot, root, iter, start) \ | ||
| 432 | for (slot = radix_tree_iter_init(iter, start) ; \ | ||
| 433 | slot || (slot = radix_tree_next_chunk(root, iter, \ | ||
| 434 | RADIX_TREE_ITER_CONTIG)) ; \ | ||
| 435 | slot = radix_tree_next_slot(slot, iter, \ | ||
| 436 | RADIX_TREE_ITER_CONTIG)) | ||
| 437 | |||
| 438 | /** | ||
| 439 | * radix_tree_for_each_tagged - iterate over tagged slots | ||
| 440 | * | ||
| 441 | * @slot: the void** variable for pointer to slot | ||
| 442 | * @root: the struct radix_tree_root pointer | ||
| 443 | * @iter: the struct radix_tree_iter pointer | ||
| 444 | * @start: iteration starting index | ||
| 445 | * @tag: tag index | ||
| 446 | * | ||
| 447 | * @slot points to radix tree slot, @iter->index contains its index. | ||
| 448 | */ | ||
| 449 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ | ||
| 450 | for (slot = radix_tree_iter_init(iter, start) ; \ | ||
| 451 | slot || (slot = radix_tree_next_chunk(root, iter, \ | ||
| 452 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | ||
| 453 | slot = radix_tree_next_slot(slot, iter, \ | ||
| 454 | RADIX_TREE_ITER_TAGGED)) | ||
| 455 | |||
| 260 | #endif /* _LINUX_RADIX_TREE_H */ | 456 | #endif /* _LINUX_RADIX_TREE_H */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 8cc38d3bab0c..10530d92c04b 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -102,6 +102,22 @@ static inline void call_function_init(void) { } | |||
| 102 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | 102 | int on_each_cpu(smp_call_func_t func, void *info, int wait); |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * Call a function on processors specified by mask, which might include | ||
| 106 | * the local one. | ||
| 107 | */ | ||
| 108 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
| 109 | void *info, bool wait); | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Call a function on each processor for which the supplied function | ||
| 113 | * cond_func returns a positive value. This may include the local | ||
| 114 | * processor. | ||
| 115 | */ | ||
| 116 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 117 | smp_call_func_t func, void *info, bool wait, | ||
| 118 | gfp_t gfp_flags); | ||
| 119 | |||
| 120 | /* | ||
| 105 | * Mark the boot cpu "online" so that it can call console drivers in | 121 | * Mark the boot cpu "online" so that it can call console drivers in |
| 106 | * printk() and can access its per-cpu storage. | 122 | * printk() and can access its per-cpu storage. |
| 107 | */ | 123 | */ |
| @@ -132,6 +148,36 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) | |||
| 132 | local_irq_enable(); \ | 148 | local_irq_enable(); \ |
| 133 | 0; \ | 149 | 0; \ |
| 134 | }) | 150 | }) |
| 151 | /* | ||
| 152 | * Note we still need to test the mask even for UP | ||
| 153 | * because we actually can get an empty mask from | ||
| 154 | * code that on SMP might call us without the local | ||
| 155 | * CPU in the mask. | ||
| 156 | */ | ||
| 157 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
| 158 | do { \ | ||
| 159 | if (cpumask_test_cpu(0, (mask))) { \ | ||
| 160 | local_irq_disable(); \ | ||
| 161 | (func)(info); \ | ||
| 162 | local_irq_enable(); \ | ||
| 163 | } \ | ||
| 164 | } while (0) | ||
| 165 | /* | ||
| 166 | * Preemption is disabled here to make sure the cond_func is called under the | ||
| 167 | * same condtions in UP and SMP. | ||
| 168 | */ | ||
| 169 | #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ | ||
| 170 | do { \ | ||
| 171 | void *__info = (info); \ | ||
| 172 | preempt_disable(); \ | ||
| 173 | if ((cond_func)(0, __info)) { \ | ||
| 174 | local_irq_disable(); \ | ||
| 175 | (func)(__info); \ | ||
| 176 | local_irq_enable(); \ | ||
| 177 | } \ | ||
| 178 | preempt_enable(); \ | ||
| 179 | } while (0) | ||
| 180 | |||
| 135 | static inline void smp_send_reschedule(int cpu) { } | 181 | static inline void smp_send_reschedule(int cpu) { } |
| 136 | #define num_booting_cpus() 1 | 182 | #define num_booting_cpus() 1 |
| 137 | #define smp_prepare_boot_cpu() do {} while (0) | 183 | #define smp_prepare_boot_cpu() do {} while (0) |
diff --git a/include/linux/swap.h b/include/linux/swap.h index b86b5c20617d..8dc0ea7caf02 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -21,6 +21,9 @@ struct bio; | |||
| 21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 21 | #define SWAP_FLAG_PRIO_SHIFT 0 |
| 22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | 22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ |
| 23 | 23 | ||
| 24 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ | ||
| 25 | SWAP_FLAG_DISCARD) | ||
| 26 | |||
| 24 | static inline int current_is_kswapd(void) | 27 | static inline int current_is_kswapd(void) |
| 25 | { | 28 | { |
| 26 | return current->flags & PF_KSWAPD; | 29 | return current->flags & PF_KSWAPD; |
