aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/percpu-refcount.h122
-rw-r--r--include/linux/percpu.h13
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/proportions.h5
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_frag.h2
8 files changed, 104 insertions, 56 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a1e31f274fcd..c13a0c09faea 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -140,6 +140,7 @@ enum {
140}; 140};
141 141
142struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 142struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
143void blk_mq_finish_init(struct request_queue *q);
143int blk_mq_register_disk(struct gendisk *); 144int blk_mq_register_disk(struct gendisk *);
144void blk_mq_unregister_disk(struct gendisk *); 145void blk_mq_unregister_disk(struct gendisk *);
145 146
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
10#include <linux/percpu_counter.h> 10#include <linux/percpu_counter.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/seqlock.h> 12#include <linux/seqlock.h>
13#include <linux/gfp.h>
13 14
14/* 15/*
15 * When maximum proportion of some event type is specified, this is the 16 * When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
32 seqcount_t sequence; 33 seqcount_t sequence;
33}; 34};
34 35
35int fprop_global_init(struct fprop_global *p); 36int fprop_global_init(struct fprop_global *p, gfp_t gfp);
36void fprop_global_destroy(struct fprop_global *p); 37void fprop_global_destroy(struct fprop_global *p);
37bool fprop_new_period(struct fprop_global *p, int periods); 38bool fprop_new_period(struct fprop_global *p, int periods);
38 39
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
79 raw_spinlock_t lock; /* Protect period and numerator */ 80 raw_spinlock_t lock; /* Protect period and numerator */
80}; 81};
81 82
82int fprop_local_init_percpu(struct fprop_local_percpu *pl); 83int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
83void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); 84void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
84void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); 85void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
85void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, 86void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 68a64f11ce02..d5c89e0dd0e6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less 14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see 15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS. 16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 * 17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the 18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() 19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -49,29 +49,60 @@
49#include <linux/kernel.h> 49#include <linux/kernel.h>
50#include <linux/percpu.h> 50#include <linux/percpu.h>
51#include <linux/rcupdate.h> 51#include <linux/rcupdate.h>
52#include <linux/gfp.h>
52 53
53struct percpu_ref; 54struct percpu_ref;
54typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
55 56
57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64};
65
66/* @flags for percpu_ref_init() */
67enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81};
82
56struct percpu_ref { 83struct percpu_ref {
57 atomic_t count; 84 atomic_long_t count;
58 /* 85 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 86 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t. 87 * mode; if set, then get/put will manipulate the atomic_t.
61 */ 88 */
62 unsigned long pcpu_count_ptr; 89 unsigned long percpu_count_ptr;
63 percpu_ref_func_t *release; 90 percpu_ref_func_t *release;
64 percpu_ref_func_t *confirm_kill; 91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
65 struct rcu_head rcu; 93 struct rcu_head rcu;
66}; 94};
67 95
68int __must_check percpu_ref_init(struct percpu_ref *ref, 96int __must_check percpu_ref_init(struct percpu_ref *ref,
69 percpu_ref_func_t *release); 97 percpu_ref_func_t *release, unsigned int flags,
70void percpu_ref_reinit(struct percpu_ref *ref); 98 gfp_t gfp);
71void percpu_ref_exit(struct percpu_ref *ref); 99void percpu_ref_exit(struct percpu_ref *ref);
100void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
72void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 103void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 percpu_ref_func_t *confirm_kill); 104 percpu_ref_func_t *confirm_kill);
74void __percpu_ref_kill_expedited(struct percpu_ref *ref); 105void percpu_ref_reinit(struct percpu_ref *ref);
75 106
76/** 107/**
77 * percpu_ref_kill - drop the initial ref 108 * percpu_ref_kill - drop the initial ref
@@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
88 return percpu_ref_kill_and_confirm(ref, NULL); 119 return percpu_ref_kill_and_confirm(ref, NULL);
89} 120}
90 121
91#define PCPU_REF_DEAD 1
92
93/* 122/*
94 * Internal helper. Don't use outside percpu-refcount proper. The 123 * Internal helper. Don't use outside percpu-refcount proper. The
95 * function doesn't return the pointer and let the caller test it for NULL 124 * function doesn't return the pointer and let the caller test it for NULL
96 * because doing so forces the compiler to generate two conditional 125 * because doing so forces the compiler to generate two conditional
97 * branches as it can't assume that @ref->pcpu_count is not NULL. 126 * branches as it can't assume that @ref->percpu_count is not NULL.
98 */ 127 */
99static inline bool __pcpu_ref_alive(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
100 unsigned __percpu **pcpu_countp) 129 unsigned long __percpu **percpu_countp)
101{ 130{
102 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); 131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
103 132
104 /* paired with smp_store_release() in percpu_ref_reinit() */ 133 /* paired with smp_store_release() in percpu_ref_reinit() */
105 smp_read_barrier_depends(); 134 smp_read_barrier_depends();
106 135
107 if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) 136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
108 return false; 137 return false;
109 138
110 *pcpu_countp = (unsigned __percpu *)pcpu_ptr; 139 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
111 return true; 140 return true;
112} 141}
113 142
@@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
115 * percpu_ref_get - increment a percpu refcount 144 * percpu_ref_get - increment a percpu refcount
116 * @ref: percpu_ref to get 145 * @ref: percpu_ref to get
117 * 146 *
118 * Analagous to atomic_inc(). 147 * Analagous to atomic_long_inc().
119 */ 148 *
149 * This function is safe to call as long as @ref is between init and exit.
150 */
120static inline void percpu_ref_get(struct percpu_ref *ref) 151static inline void percpu_ref_get(struct percpu_ref *ref)
121{ 152{
122 unsigned __percpu *pcpu_count; 153 unsigned long __percpu *percpu_count;
123 154
124 rcu_read_lock_sched(); 155 rcu_read_lock_sched();
125 156
126 if (__pcpu_ref_alive(ref, &pcpu_count)) 157 if (__ref_is_percpu(ref, &percpu_count))
127 this_cpu_inc(*pcpu_count); 158 this_cpu_inc(*percpu_count);
128 else 159 else
129 atomic_inc(&ref->count); 160 atomic_long_inc(&ref->count);
130 161
131 rcu_read_unlock_sched(); 162 rcu_read_unlock_sched();
132} 163}
@@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
138 * Increment a percpu refcount unless its count already reached zero. 169 * Increment a percpu refcount unless its count already reached zero.
139 * Returns %true on success; %false on failure. 170 * Returns %true on success; %false on failure.
140 * 171 *
141 * The caller is responsible for ensuring that @ref stays accessible. 172 * This function is safe to call as long as @ref is between init and exit.
142 */ 173 */
143static inline bool percpu_ref_tryget(struct percpu_ref *ref) 174static inline bool percpu_ref_tryget(struct percpu_ref *ref)
144{ 175{
145 unsigned __percpu *pcpu_count; 176 unsigned long __percpu *percpu_count;
146 int ret = false; 177 int ret;
147 178
148 rcu_read_lock_sched(); 179 rcu_read_lock_sched();
149 180
150 if (__pcpu_ref_alive(ref, &pcpu_count)) { 181 if (__ref_is_percpu(ref, &percpu_count)) {
151 this_cpu_inc(*pcpu_count); 182 this_cpu_inc(*percpu_count);
152 ret = true; 183 ret = true;
153 } else { 184 } else {
154 ret = atomic_inc_not_zero(&ref->count); 185 ret = atomic_long_inc_not_zero(&ref->count);
155 } 186 }
156 187
157 rcu_read_unlock_sched(); 188 rcu_read_unlock_sched();
@@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
166 * Increment a percpu refcount unless it has already been killed. Returns 197 * Increment a percpu refcount unless it has already been killed. Returns
167 * %true on success; %false on failure. 198 * %true on success; %false on failure.
168 * 199 *
169 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget 200 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
170 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be 201 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
171 * used. After the confirm_kill callback is invoked, it's guaranteed that 202 * should be used. After the confirm_kill callback is invoked, it's
172 * no new reference will be given out by percpu_ref_tryget(). 203 * guaranteed that no new reference will be given out by
204 * percpu_ref_tryget_live().
173 * 205 *
174 * The caller is responsible for ensuring that @ref stays accessible. 206 * This function is safe to call as long as @ref is between init and exit.
175 */ 207 */
176static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 208static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
177{ 209{
178 unsigned __percpu *pcpu_count; 210 unsigned long __percpu *percpu_count;
179 int ret = false; 211 int ret = false;
180 212
181 rcu_read_lock_sched(); 213 rcu_read_lock_sched();
182 214
183 if (__pcpu_ref_alive(ref, &pcpu_count)) { 215 if (__ref_is_percpu(ref, &percpu_count)) {
184 this_cpu_inc(*pcpu_count); 216 this_cpu_inc(*percpu_count);
185 ret = true; 217 ret = true;
218 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
219 ret = atomic_long_inc_not_zero(&ref->count);
186 } 220 }
187 221
188 rcu_read_unlock_sched(); 222 rcu_read_unlock_sched();
@@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
196 * 230 *
197 * Decrement the refcount, and if 0, call the release function (which was passed 231 * Decrement the refcount, and if 0, call the release function (which was passed
198 * to percpu_ref_init()) 232 * to percpu_ref_init())
233 *
234 * This function is safe to call as long as @ref is between init and exit.
199 */ 235 */
200static inline void percpu_ref_put(struct percpu_ref *ref) 236static inline void percpu_ref_put(struct percpu_ref *ref)
201{ 237{
202 unsigned __percpu *pcpu_count; 238 unsigned long __percpu *percpu_count;
203 239
204 rcu_read_lock_sched(); 240 rcu_read_lock_sched();
205 241
206 if (__pcpu_ref_alive(ref, &pcpu_count)) 242 if (__ref_is_percpu(ref, &percpu_count))
207 this_cpu_dec(*pcpu_count); 243 this_cpu_dec(*percpu_count);
208 else if (unlikely(atomic_dec_and_test(&ref->count))) 244 else if (unlikely(atomic_long_dec_and_test(&ref->count)))
209 ref->release(ref); 245 ref->release(ref);
210 246
211 rcu_read_unlock_sched(); 247 rcu_read_unlock_sched();
@@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
216 * @ref: percpu_ref to test 252 * @ref: percpu_ref to test
217 * 253 *
218 * Returns %true if @ref reached zero. 254 * Returns %true if @ref reached zero.
255 *
256 * This function is safe to call as long as @ref is between init and exit.
219 */ 257 */
220static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 258static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
221{ 259{
222 unsigned __percpu *pcpu_count; 260 unsigned long __percpu *percpu_count;
223 261
224 if (__pcpu_ref_alive(ref, &pcpu_count)) 262 if (__ref_is_percpu(ref, &percpu_count))
225 return false; 263 return false;
226 return !atomic_read(&ref->count); 264 return !atomic_long_read(&ref->count);
227} 265}
228 266
229#endif 267#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..a3aa63e47637 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,9 +48,9 @@
48 * intelligent way to determine this would be nice. 48 * intelligent way to determine this would be nice.
49 */ 49 */
50#if BITS_PER_LONG > 32 50#if BITS_PER_LONG > 32
51#define PERCPU_DYNAMIC_RESERVE (20 << 10) 51#define PERCPU_DYNAMIC_RESERVE (28 << 10)
52#else 52#else
53#define PERCPU_DYNAMIC_RESERVE (12 << 10) 53#define PERCPU_DYNAMIC_RESERVE (20 << 10)
54#endif 54#endif
55 55
56extern void *pcpu_base_addr; 56extern void *pcpu_base_addr;
@@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void);
122#endif 122#endif
123extern void __init percpu_init_late(void); 123extern void __init percpu_init_late(void);
124 124
125extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
125extern void __percpu *__alloc_percpu(size_t size, size_t align); 126extern void __percpu *__alloc_percpu(size_t size, size_t align);
126extern void free_percpu(void __percpu *__pdata); 127extern void free_percpu(void __percpu *__pdata);
127extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 128extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
128 129
129#define alloc_percpu(type) \ 130#define alloc_percpu_gfp(type, gfp) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 131 (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
132 __alignof__(type), gfp)
133#define alloc_percpu(type) \
134 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
135 __alignof__(type))
131 136
132#endif /* __LINUX_PERCPU_H */ 137#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/gfp.h>
15 16
16#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
17 18
@@ -26,14 +27,14 @@ struct percpu_counter {
26 27
27extern int percpu_counter_batch; 28extern int percpu_counter_batch;
28 29
29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
30 struct lock_class_key *key); 31 struct lock_class_key *key);
31 32
32#define percpu_counter_init(fbc, value) \ 33#define percpu_counter_init(fbc, value, gfp) \
33 ({ \ 34 ({ \
34 static struct lock_class_key __key; \ 35 static struct lock_class_key __key; \
35 \ 36 \
36 __percpu_counter_init(fbc, value, &__key); \ 37 __percpu_counter_init(fbc, value, gfp, &__key); \
37 }) 38 })
38 39
39void percpu_counter_destroy(struct percpu_counter *fbc); 40void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
89 s64 count; 90 s64 count;
90}; 91};
91 92
92static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 93static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
94 gfp_t gfp)
93{ 95{
94 fbc->count = amount; 96 fbc->count = amount;
95 return 0; 97 return 0;
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
12#include <linux/percpu_counter.h> 12#include <linux/percpu_counter.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/gfp.h>
15 16
16struct prop_global { 17struct prop_global {
17 /* 18 /*
@@ -40,7 +41,7 @@ struct prop_descriptor {
40 struct mutex mutex; /* serialize the prop_global switch */ 41 struct mutex mutex; /* serialize the prop_global switch */
41}; 42};
42 43
43int prop_descriptor_init(struct prop_descriptor *pd, int shift); 44int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
44void prop_change_shift(struct prop_descriptor *pd, int new_shift); 45void prop_change_shift(struct prop_descriptor *pd, int new_shift);
45 46
46/* 47/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
61 raw_spinlock_t lock; /* protect the snapshot state */ 62 raw_spinlock_t lock; /* protect the snapshot state */
62}; 63};
63 64
64int prop_local_init_percpu(struct prop_local_percpu *pl); 65int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
65void prop_local_destroy_percpu(struct prop_local_percpu *pl); 66void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); 67void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, 68void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 2f26dfb8450e..1f99a1de0e4f 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -63,7 +63,7 @@ static inline void dst_entries_add(struct dst_ops *dst, int val)
63 63
64static inline int dst_entries_init(struct dst_ops *dst) 64static inline int dst_entries_init(struct dst_ops *dst)
65{ 65{
66 return percpu_counter_init(&dst->pcpuc_entries, 0); 66 return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
67} 67}
68 68
69static inline void dst_entries_destroy(struct dst_ops *dst) 69static inline void dst_entries_destroy(struct dst_ops *dst)
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 65a8855e99fe..8d1765577acc 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -151,7 +151,7 @@ static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
151 151
152static inline void init_frag_mem_limit(struct netns_frags *nf) 152static inline void init_frag_mem_limit(struct netns_frags *nf)
153{ 153{
154 percpu_counter_init(&nf->mem, 0); 154 percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
155} 155}
156 156
157static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) 157static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)