diff options
| author | Yehuda Sadeh <yehuda@hq.newdream.net> | 2010-06-17 19:16:12 -0400 |
|---|---|---|
| committer | Sage Weil <sage@newdream.net> | 2010-08-01 23:11:40 -0400 |
| commit | 37151668bad3fd058368752bee476f2ba3645596 (patch) | |
| tree | 6eeae77dfa1c758ff03659b5677f474a72fbe7c0 | |
| parent | 0deb01c9998f8112c5e478e3fe3a930131abbc0a (diff) | |
ceph: do caps accounting per mds_client
Caps related accounting is now being done per mds client instead
of just being global. This prepares ground work for a later revision
of the caps preallocated reservation list.
Signed-off-by: Yehuda Sadeh <yehuda@hq.newdream.net>
Signed-off-by: Sage Weil <sage@newdream.net>
| -rw-r--r-- | fs/ceph/caps.c | 187 | ||||
| -rw-r--r-- | fs/ceph/mds_client.c | 16 | ||||
| -rw-r--r-- | fs/ceph/mds_client.h | 22 | ||||
| -rw-r--r-- | fs/ceph/super.c | 6 | ||||
| -rw-r--r-- | fs/ceph/super.h | 15 |
5 files changed, 131 insertions, 115 deletions
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d992880d21d4..47068b10baf8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -113,58 +113,41 @@ const char *ceph_cap_string(int caps) | |||
| 113 | return cap_str[i]; | 113 | return cap_str[i]; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* | 116 | void ceph_caps_init(struct ceph_mds_client *mdsc) |
| 117 | * Cap reservations | ||
| 118 | * | ||
| 119 | * Maintain a global pool of preallocated struct ceph_caps, referenced | ||
| 120 | * by struct ceph_caps_reservations. This ensures that we preallocate | ||
| 121 | * memory needed to successfully process an MDS response. (If an MDS | ||
| 122 | * sends us cap information and we fail to process it, we will have | ||
| 123 | * problems due to the client and MDS being out of sync.) | ||
| 124 | * | ||
| 125 | * Reservations are 'owned' by a ceph_cap_reservation context. | ||
| 126 | */ | ||
| 127 | static spinlock_t caps_list_lock; | ||
| 128 | static struct list_head caps_list; /* unused (reserved or unreserved) */ | ||
| 129 | static int caps_total_count; /* total caps allocated */ | ||
| 130 | static int caps_use_count; /* in use */ | ||
| 131 | static int caps_reserve_count; /* unused, reserved */ | ||
| 132 | static int caps_avail_count; /* unused, unreserved */ | ||
| 133 | static int caps_min_count; /* keep at least this many (unreserved) */ | ||
| 134 | |||
| 135 | void __init ceph_caps_init(void) | ||
| 136 | { | 117 | { |
| 137 | INIT_LIST_HEAD(&caps_list); | 118 | INIT_LIST_HEAD(&mdsc->caps_list); |
| 138 | spin_lock_init(&caps_list_lock); | 119 | spin_lock_init(&mdsc->caps_list_lock); |
| 139 | } | 120 | } |
| 140 | 121 | ||
| 141 | void ceph_caps_finalize(void) | 122 | void ceph_caps_finalize(struct ceph_mds_client *mdsc) |
| 142 | { | 123 | { |
| 143 | struct ceph_cap *cap; | 124 | struct ceph_cap *cap; |
| 144 | 125 | ||
| 145 | spin_lock(&caps_list_lock); | 126 | spin_lock(&mdsc->caps_list_lock); |
| 146 | while (!list_empty(&caps_list)) { | 127 | while (!list_empty(&mdsc->caps_list)) { |
| 147 | cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); | 128 | cap = list_first_entry(&mdsc->caps_list, |
| 129 | struct ceph_cap, caps_item); | ||
| 148 | list_del(&cap->caps_item); | 130 | list_del(&cap->caps_item); |
| 149 | kmem_cache_free(ceph_cap_cachep, cap); | 131 | kmem_cache_free(ceph_cap_cachep, cap); |
| 150 | } | 132 | } |
| 151 | caps_total_count = 0; | 133 | mdsc->caps_total_count = 0; |
| 152 | caps_avail_count = 0; | 134 | mdsc->caps_avail_count = 0; |
| 153 | caps_use_count = 0; | 135 | mdsc->caps_use_count = 0; |
| 154 | caps_reserve_count = 0; | 136 | mdsc->caps_reserve_count = 0; |
| 155 | caps_min_count = 0; | 137 | mdsc->caps_min_count = 0; |
| 156 | spin_unlock(&caps_list_lock); | 138 | spin_unlock(&mdsc->caps_list_lock); |
| 157 | } | 139 | } |
| 158 | 140 | ||
| 159 | void ceph_adjust_min_caps(int delta) | 141 | void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta) |
| 160 | { | 142 | { |
| 161 | spin_lock(&caps_list_lock); | 143 | spin_lock(&mdsc->caps_list_lock); |
| 162 | caps_min_count += delta; | 144 | mdsc->caps_min_count += delta; |
| 163 | BUG_ON(caps_min_count < 0); | 145 | BUG_ON(mdsc->caps_min_count < 0); |
| 164 | spin_unlock(&caps_list_lock); | 146 | spin_unlock(&mdsc->caps_list_lock); |
| 165 | } | 147 | } |
| 166 | 148 | ||
| 167 | int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need) | 149 | int ceph_reserve_caps(struct ceph_mds_client *mdsc, |
| 150 | struct ceph_cap_reservation *ctx, int need) | ||
| 168 | { | 151 | { |
| 169 | int i; | 152 | int i; |
| 170 | struct ceph_cap *cap; | 153 | struct ceph_cap *cap; |
| @@ -176,16 +159,17 @@ int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need) | |||
| 176 | dout("reserve caps ctx=%p need=%d\n", ctx, need); | 159 | dout("reserve caps ctx=%p need=%d\n", ctx, need); |
| 177 | 160 | ||
| 178 | /* first reserve any caps that are already allocated */ | 161 | /* first reserve any caps that are already allocated */ |
| 179 | spin_lock(&caps_list_lock); | 162 | spin_lock(&mdsc->caps_list_lock); |
| 180 | if (caps_avail_count >= need) | 163 | if (mdsc->caps_avail_count >= need) |
| 181 | have = need; | 164 | have = need; |
| 182 | else | 165 | else |
| 183 | have = caps_avail_count; | 166 | have = mdsc->caps_avail_count; |
| 184 | caps_avail_count -= have; | 167 | mdsc->caps_avail_count -= have; |
| 185 | caps_reserve_count += have; | 168 | mdsc->caps_reserve_count += have; |
| 186 | BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + | 169 | BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + |
| 187 | caps_avail_count); | 170 | mdsc->caps_reserve_count + |
| 188 | spin_unlock(&caps_list_lock); | 171 | mdsc->caps_avail_count); |
| 172 | spin_unlock(&mdsc->caps_list_lock); | ||
| 189 | 173 | ||
| 190 | for (i = have; i < need; i++) { | 174 | for (i = have; i < need; i++) { |
| 191 | cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); | 175 | cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); |
| @@ -198,19 +182,20 @@ int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need) | |||
| 198 | } | 182 | } |
| 199 | BUG_ON(have + alloc != need); | 183 | BUG_ON(have + alloc != need); |
| 200 | 184 | ||
| 201 | spin_lock(&caps_list_lock); | 185 | spin_lock(&mdsc->caps_list_lock); |
| 202 | caps_total_count += alloc; | 186 | mdsc->caps_total_count += alloc; |
| 203 | caps_reserve_count += alloc; | 187 | mdsc->caps_reserve_count += alloc; |
| 204 | list_splice(&newcaps, &caps_list); | 188 | list_splice(&newcaps, &mdsc->caps_list); |
| 205 | 189 | ||
| 206 | BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + | 190 | BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + |
| 207 | caps_avail_count); | 191 | mdsc->caps_reserve_count + |
| 208 | spin_unlock(&caps_list_lock); | 192 | mdsc->caps_avail_count); |
| 193 | spin_unlock(&mdsc->caps_list_lock); | ||
| 209 | 194 | ||
| 210 | ctx->count = need; | 195 | ctx->count = need; |
| 211 | dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", | 196 | dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", |
| 212 | ctx, caps_total_count, caps_use_count, caps_reserve_count, | 197 | ctx, mdsc->caps_total_count, mdsc->caps_use_count, |
| 213 | caps_avail_count); | 198 | mdsc->caps_reserve_count, mdsc->caps_avail_count); |
| 214 | return 0; | 199 | return 0; |
| 215 | 200 | ||
| 216 | out_alloc_count: | 201 | out_alloc_count: |
| @@ -220,26 +205,29 @@ out_alloc_count: | |||
| 220 | return ret; | 205 | return ret; |
| 221 | } | 206 | } |
| 222 | 207 | ||
| 223 | int ceph_unreserve_caps(struct ceph_cap_reservation *ctx) | 208 | int ceph_unreserve_caps(struct ceph_mds_client *mdsc, |
| 209 | struct ceph_cap_reservation *ctx) | ||
| 224 | { | 210 | { |
| 225 | dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); | 211 | dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); |
| 226 | if (ctx->count) { | 212 | if (ctx->count) { |
| 227 | spin_lock(&caps_list_lock); | 213 | spin_lock(&mdsc->caps_list_lock); |
| 228 | BUG_ON(caps_reserve_count < ctx->count); | 214 | BUG_ON(mdsc->caps_reserve_count < ctx->count); |
| 229 | caps_reserve_count -= ctx->count; | 215 | mdsc->caps_reserve_count -= ctx->count; |
| 230 | caps_avail_count += ctx->count; | 216 | mdsc->caps_avail_count += ctx->count; |
| 231 | ctx->count = 0; | 217 | ctx->count = 0; |
| 232 | dout("unreserve caps %d = %d used + %d resv + %d avail\n", | 218 | dout("unreserve caps %d = %d used + %d resv + %d avail\n", |
| 233 | caps_total_count, caps_use_count, caps_reserve_count, | 219 | mdsc->caps_total_count, mdsc->caps_use_count, |
| 234 | caps_avail_count); | 220 | mdsc->caps_reserve_count, mdsc->caps_avail_count); |
| 235 | BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + | 221 | BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + |
| 236 | caps_avail_count); | 222 | mdsc->caps_reserve_count + |
| 237 | spin_unlock(&caps_list_lock); | 223 | mdsc->caps_avail_count); |
| 224 | spin_unlock(&mdsc->caps_list_lock); | ||
| 238 | } | 225 | } |
| 239 | return 0; | 226 | return 0; |
| 240 | } | 227 | } |
| 241 | 228 | ||
| 242 | static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) | 229 | static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc, |
| 230 | struct ceph_cap_reservation *ctx) | ||
| 243 | { | 231 | { |
| 244 | struct ceph_cap *cap = NULL; | 232 | struct ceph_cap *cap = NULL; |
| 245 | 233 | ||
| @@ -247,71 +235,74 @@ static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) | |||
| 247 | if (!ctx) { | 235 | if (!ctx) { |
| 248 | cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); | 236 | cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); |
| 249 | if (cap) { | 237 | if (cap) { |
| 250 | caps_use_count++; | 238 | mdsc->caps_use_count++; |
| 251 | caps_total_count++; | 239 | mdsc->caps_total_count++; |
| 252 | } | 240 | } |
| 253 | return cap; | 241 | return cap; |
| 254 | } | 242 | } |
| 255 | 243 | ||
| 256 | spin_lock(&caps_list_lock); | 244 | spin_lock(&mdsc->caps_list_lock); |
| 257 | dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", | 245 | dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", |
| 258 | ctx, ctx->count, caps_total_count, caps_use_count, | 246 | ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, |
| 259 | caps_reserve_count, caps_avail_count); | 247 | mdsc->caps_reserve_count, mdsc->caps_avail_count); |
| 260 | BUG_ON(!ctx->count); | 248 | BUG_ON(!ctx->count); |
| 261 | BUG_ON(ctx->count > caps_reserve_count); | 249 | BUG_ON(ctx->count > mdsc->caps_reserve_count); |
| 262 | BUG_ON(list_empty(&caps_list)); | 250 | BUG_ON(list_empty(&mdsc->caps_list)); |
| 263 | 251 | ||
| 264 | ctx->count--; | 252 | ctx->count--; |
| 265 | caps_reserve_count--; | 253 | mdsc->caps_reserve_count--; |
| 266 | caps_use_count++; | 254 | mdsc->caps_use_count++; |
| 267 | 255 | ||
| 268 | cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); | 256 | cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); |
| 269 | list_del(&cap->caps_item); | 257 | list_del(&cap->caps_item); |
| 270 | 258 | ||
| 271 | BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + | 259 | BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + |
| 272 | caps_avail_count); | 260 | mdsc->caps_reserve_count + mdsc->caps_avail_count); |
| 273 | spin_unlock(&caps_list_lock); | 261 | spin_unlock(&mdsc->caps_list_lock); |
| 274 | return cap; | 262 | return cap; |
| 275 | } | 263 | } |
| 276 | 264 | ||
| 277 | void ceph_put_cap(struct ceph_cap *cap) | 265 | void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) |
| 278 | { | 266 | { |
| 279 | spin_lock(&caps_list_lock); | 267 | spin_lock(&mdsc->caps_list_lock); |
| 280 | dout("put_cap %p %d = %d used + %d resv + %d avail\n", | 268 | dout("put_cap %p %d = %d used + %d resv + %d avail\n", |
| 281 | cap, caps_total_count, caps_use_count, | 269 | cap, mdsc->caps_total_count, mdsc->caps_use_count, |
| 282 | caps_reserve_count, caps_avail_count); | 270 | mdsc->caps_reserve_count, mdsc->caps_avail_count); |
| 283 | caps_use_count--; | 271 | mdsc->caps_use_count--; |
| 284 | /* | 272 | /* |
| 285 | * Keep some preallocated caps around (ceph_min_count), to | 273 | * Keep some preallocated caps around (ceph_min_count), to |
| 286 | * avoid lots of free/alloc churn. | 274 | * avoid lots of free/alloc churn. |
| 287 | */ | 275 | */ |
| 288 | if (caps_avail_count >= caps_reserve_count + caps_min_count) { | 276 | if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + |
| 289 | caps_total_count--; | 277 | mdsc->caps_min_count) { |
| 278 | mdsc->caps_total_count--; | ||
| 290 | kmem_cache_free(ceph_cap_cachep, cap); | 279 | kmem_cache_free(ceph_cap_cachep, cap); |
| 291 | } else { | 280 | } else { |
| 292 | caps_avail_count++; | 281 | mdsc->caps_avail_count++; |
| 293 | list_add(&cap->caps_item, &caps_list); | 282 | list_add(&cap->caps_item, &mdsc->caps_list); |
| 294 | } | 283 | } |
| 295 | 284 | ||
| 296 | BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + | 285 | BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + |
| 297 | caps_avail_count); | 286 | mdsc->caps_reserve_count + mdsc->caps_avail_count); |
| 298 | spin_unlock(&caps_list_lock); | 287 | spin_unlock(&mdsc->caps_list_lock); |
| 299 | } | 288 | } |
| 300 | 289 | ||
| 301 | void ceph_reservation_status(struct ceph_client *client, | 290 | void ceph_reservation_status(struct ceph_client *client, |
| 302 | int *total, int *avail, int *used, int *reserved, | 291 | int *total, int *avail, int *used, int *reserved, |
| 303 | int *min) | 292 | int *min) |
| 304 | { | 293 | { |
| 294 | struct ceph_mds_client *mdsc = &client->mdsc; | ||
| 295 | |||
| 305 | if (total) | 296 | if (total) |
| 306 | *total = caps_total_count; | 297 | *total = mdsc->caps_total_count; |
| 307 | if (avail) | 298 | if (avail) |
| 308 | *avail = caps_avail_count; | 299 | *avail = mdsc->caps_avail_count; |
| 309 | if (used) | 300 | if (used) |
| 310 | *used = caps_use_count; | 301 | *used = mdsc->caps_use_count; |
| 311 | if (reserved) | 302 | if (reserved) |
| 312 | *reserved = caps_reserve_count; | 303 | *reserved = mdsc->caps_reserve_count; |
| 313 | if (min) | 304 | if (min) |
| 314 | *min = caps_min_count; | 305 | *min = mdsc->caps_min_count; |
| 315 | } | 306 | } |
| 316 | 307 | ||
| 317 | /* | 308 | /* |
| @@ -540,7 +531,7 @@ retry: | |||
| 540 | new_cap = NULL; | 531 | new_cap = NULL; |
| 541 | } else { | 532 | } else { |
| 542 | spin_unlock(&inode->i_lock); | 533 | spin_unlock(&inode->i_lock); |
| 543 | new_cap = get_cap(caps_reservation); | 534 | new_cap = get_cap(mdsc, caps_reservation); |
| 544 | if (new_cap == NULL) | 535 | if (new_cap == NULL) |
| 545 | return -ENOMEM; | 536 | return -ENOMEM; |
| 546 | goto retry; | 537 | goto retry; |
| @@ -898,7 +889,7 @@ void __ceph_remove_cap(struct ceph_cap *cap) | |||
| 898 | ci->i_auth_cap = NULL; | 889 | ci->i_auth_cap = NULL; |
| 899 | 890 | ||
| 900 | if (removed) | 891 | if (removed) |
| 901 | ceph_put_cap(cap); | 892 | ceph_put_cap(mdsc, cap); |
| 902 | 893 | ||
| 903 | if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { | 894 | if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { |
| 904 | struct ceph_snap_realm *realm = ci->i_snap_realm; | 895 | struct ceph_snap_realm *realm = ci->i_snap_realm; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 6e40db2a0014..641a8a37e7b3 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -449,7 +449,7 @@ void ceph_mdsc_release_request(struct kref *kref) | |||
| 449 | kfree(req->r_path1); | 449 | kfree(req->r_path1); |
| 450 | kfree(req->r_path2); | 450 | kfree(req->r_path2); |
| 451 | put_request_session(req); | 451 | put_request_session(req); |
| 452 | ceph_unreserve_caps(&req->r_caps_reservation); | 452 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
| 453 | kfree(req); | 453 | kfree(req); |
| 454 | } | 454 | } |
| 455 | 455 | ||
| @@ -512,7 +512,8 @@ static void __register_request(struct ceph_mds_client *mdsc, | |||
| 512 | { | 512 | { |
| 513 | req->r_tid = ++mdsc->last_tid; | 513 | req->r_tid = ++mdsc->last_tid; |
| 514 | if (req->r_num_caps) | 514 | if (req->r_num_caps) |
| 515 | ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps); | 515 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
| 516 | req->r_num_caps); | ||
| 516 | dout("__register_request %p tid %lld\n", req, req->r_tid); | 517 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
| 517 | ceph_mdsc_get_request(req); | 518 | ceph_mdsc_get_request(req); |
| 518 | __insert_request(mdsc, req); | 519 | __insert_request(mdsc, req); |
| @@ -764,7 +765,7 @@ static int iterate_session_caps(struct ceph_mds_session *session, | |||
| 764 | last_inode = NULL; | 765 | last_inode = NULL; |
| 765 | } | 766 | } |
| 766 | if (old_cap) { | 767 | if (old_cap) { |
| 767 | ceph_put_cap(old_cap); | 768 | ceph_put_cap(session->s_mdsc, old_cap); |
| 768 | old_cap = NULL; | 769 | old_cap = NULL; |
| 769 | } | 770 | } |
| 770 | 771 | ||
| @@ -793,7 +794,7 @@ out: | |||
| 793 | if (last_inode) | 794 | if (last_inode) |
| 794 | iput(last_inode); | 795 | iput(last_inode); |
| 795 | if (old_cap) | 796 | if (old_cap) |
| 796 | ceph_put_cap(old_cap); | 797 | ceph_put_cap(session->s_mdsc, old_cap); |
| 797 | 798 | ||
| 798 | return ret; | 799 | return ret; |
| 799 | } | 800 | } |
| @@ -1251,6 +1252,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |||
| 1251 | return ERR_PTR(-ENOMEM); | 1252 | return ERR_PTR(-ENOMEM); |
| 1252 | 1253 | ||
| 1253 | mutex_init(&req->r_fill_mutex); | 1254 | mutex_init(&req->r_fill_mutex); |
| 1255 | req->r_mdsc = mdsc; | ||
| 1254 | req->r_started = jiffies; | 1256 | req->r_started = jiffies; |
| 1255 | req->r_resend_mds = -1; | 1257 | req->r_resend_mds = -1; |
| 1256 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | 1258 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); |
| @@ -1986,7 +1988,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
| 1986 | if (err == 0) { | 1988 | if (err == 0) { |
| 1987 | if (result == 0 && rinfo->dir_nr) | 1989 | if (result == 0 && rinfo->dir_nr) |
| 1988 | ceph_readdir_prepopulate(req, req->r_session); | 1990 | ceph_readdir_prepopulate(req, req->r_session); |
| 1989 | ceph_unreserve_caps(&req->r_caps_reservation); | 1991 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
| 1990 | } | 1992 | } |
| 1991 | mutex_unlock(&req->r_fill_mutex); | 1993 | mutex_unlock(&req->r_fill_mutex); |
| 1992 | 1994 | ||
| @@ -2767,6 +2769,9 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) | |||
| 2767 | spin_lock_init(&mdsc->dentry_lru_lock); | 2769 | spin_lock_init(&mdsc->dentry_lru_lock); |
| 2768 | INIT_LIST_HEAD(&mdsc->dentry_lru); | 2770 | INIT_LIST_HEAD(&mdsc->dentry_lru); |
| 2769 | 2771 | ||
| 2772 | ceph_caps_init(mdsc); | ||
| 2773 | ceph_adjust_min_caps(mdsc, client->min_caps); | ||
| 2774 | |||
| 2770 | return 0; | 2775 | return 0; |
| 2771 | } | 2776 | } |
| 2772 | 2777 | ||
| @@ -2962,6 +2967,7 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc) | |||
| 2962 | if (mdsc->mdsmap) | 2967 | if (mdsc->mdsmap) |
| 2963 | ceph_mdsmap_destroy(mdsc->mdsmap); | 2968 | ceph_mdsmap_destroy(mdsc->mdsmap); |
| 2964 | kfree(mdsc->sessions); | 2969 | kfree(mdsc->sessions); |
| 2970 | ceph_caps_finalize(mdsc); | ||
| 2965 | } | 2971 | } |
| 2966 | 2972 | ||
| 2967 | 2973 | ||
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index e389902db131..8f2126321f2d 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
| @@ -151,6 +151,7 @@ typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, | |||
| 151 | struct ceph_mds_request { | 151 | struct ceph_mds_request { |
| 152 | u64 r_tid; /* transaction id */ | 152 | u64 r_tid; /* transaction id */ |
| 153 | struct rb_node r_node; | 153 | struct rb_node r_node; |
| 154 | struct ceph_mds_client *r_mdsc; | ||
| 154 | 155 | ||
| 155 | int r_op; /* mds op code */ | 156 | int r_op; /* mds op code */ |
| 156 | int r_mds; | 157 | int r_mds; |
| @@ -267,6 +268,27 @@ struct ceph_mds_client { | |||
| 267 | spinlock_t cap_dirty_lock; /* protects above items */ | 268 | spinlock_t cap_dirty_lock; /* protects above items */ |
| 268 | wait_queue_head_t cap_flushing_wq; | 269 | wait_queue_head_t cap_flushing_wq; |
| 269 | 270 | ||
| 271 | /* | ||
| 272 | * Cap reservations | ||
| 273 | * | ||
| 274 | * Maintain a global pool of preallocated struct ceph_caps, referenced | ||
| 275 | * by struct ceph_caps_reservations. This ensures that we preallocate | ||
| 276 | * memory needed to successfully process an MDS response. (If an MDS | ||
| 277 | * sends us cap information and we fail to process it, we will have | ||
| 278 | * problems due to the client and MDS being out of sync.) | ||
| 279 | * | ||
| 280 | * Reservations are 'owned' by a ceph_cap_reservation context. | ||
| 281 | */ | ||
| 282 | spinlock_t caps_list_lock; | ||
| 283 | struct list_head caps_list; /* unused (reserved or | ||
| 284 | unreserved) */ | ||
| 285 | int caps_total_count; /* total caps allocated */ | ||
| 286 | int caps_use_count; /* in use */ | ||
| 287 | int caps_reserve_count; /* unused, reserved */ | ||
| 288 | int caps_avail_count; /* unused, unreserved */ | ||
| 289 | int caps_min_count; /* keep at least this many | ||
| 290 | (unreserved) */ | ||
| 291 | |||
| 270 | #ifdef CONFIG_DEBUG_FS | 292 | #ifdef CONFIG_DEBUG_FS |
| 271 | struct dentry *debugfs_file; | 293 | struct dentry *debugfs_file; |
| 272 | #endif | 294 | #endif |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index fa87f51e38e1..1a0bb4863a5d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
| @@ -630,7 +630,6 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) | |||
| 630 | 630 | ||
| 631 | /* caps */ | 631 | /* caps */ |
| 632 | client->min_caps = args->max_readdir; | 632 | client->min_caps = args->max_readdir; |
| 633 | ceph_adjust_min_caps(client->min_caps); | ||
| 634 | 633 | ||
| 635 | /* subsystems */ | 634 | /* subsystems */ |
| 636 | err = ceph_monc_init(&client->monc, client); | 635 | err = ceph_monc_init(&client->monc, client); |
| @@ -680,8 +679,6 @@ static void ceph_destroy_client(struct ceph_client *client) | |||
| 680 | 679 | ||
| 681 | ceph_monc_stop(&client->monc); | 680 | ceph_monc_stop(&client->monc); |
| 682 | 681 | ||
| 683 | ceph_adjust_min_caps(-client->min_caps); | ||
| 684 | |||
| 685 | ceph_debugfs_client_cleanup(client); | 682 | ceph_debugfs_client_cleanup(client); |
| 686 | destroy_workqueue(client->wb_wq); | 683 | destroy_workqueue(client->wb_wq); |
| 687 | destroy_workqueue(client->pg_inv_wq); | 684 | destroy_workqueue(client->pg_inv_wq); |
| @@ -1043,8 +1040,6 @@ static int __init init_ceph(void) | |||
| 1043 | if (ret) | 1040 | if (ret) |
| 1044 | goto out_msgr; | 1041 | goto out_msgr; |
| 1045 | 1042 | ||
| 1046 | ceph_caps_init(); | ||
| 1047 | |||
| 1048 | ret = register_filesystem(&ceph_fs_type); | 1043 | ret = register_filesystem(&ceph_fs_type); |
| 1049 | if (ret) | 1044 | if (ret) |
| 1050 | goto out_icache; | 1045 | goto out_icache; |
| @@ -1069,7 +1064,6 @@ static void __exit exit_ceph(void) | |||
| 1069 | { | 1064 | { |
| 1070 | dout("exit_ceph\n"); | 1065 | dout("exit_ceph\n"); |
| 1071 | unregister_filesystem(&ceph_fs_type); | 1066 | unregister_filesystem(&ceph_fs_type); |
| 1072 | ceph_caps_finalize(); | ||
| 1073 | destroy_caches(); | 1067 | destroy_caches(); |
| 1074 | ceph_msgr_exit(); | 1068 | ceph_msgr_exit(); |
| 1075 | ceph_debugfs_cleanup(); | 1069 | ceph_debugfs_cleanup(); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 10a4a406e887..44d10cb0aeca 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -560,11 +560,13 @@ static inline int __ceph_caps_wanted(struct ceph_inode_info *ci) | |||
| 560 | /* what the mds thinks we want */ | 560 | /* what the mds thinks we want */ |
| 561 | extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci); | 561 | extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci); |
| 562 | 562 | ||
| 563 | extern void ceph_caps_init(void); | 563 | extern void ceph_caps_init(struct ceph_mds_client *mdsc); |
| 564 | extern void ceph_caps_finalize(void); | 564 | extern void ceph_caps_finalize(struct ceph_mds_client *mdsc); |
| 565 | extern void ceph_adjust_min_caps(int delta); | 565 | extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta); |
| 566 | extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need); | 566 | extern int ceph_reserve_caps(struct ceph_mds_client *mdsc, |
| 567 | extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx); | 567 | struct ceph_cap_reservation *ctx, int need); |
| 568 | extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc, | ||
| 569 | struct ceph_cap_reservation *ctx); | ||
| 568 | extern void ceph_reservation_status(struct ceph_client *client, | 570 | extern void ceph_reservation_status(struct ceph_client *client, |
| 569 | int *total, int *avail, int *used, | 571 | int *total, int *avail, int *used, |
| 570 | int *reserved, int *min); | 572 | int *reserved, int *min); |
| @@ -806,7 +808,8 @@ static inline void ceph_remove_cap(struct ceph_cap *cap) | |||
| 806 | __ceph_remove_cap(cap); | 808 | __ceph_remove_cap(cap); |
| 807 | spin_unlock(&inode->i_lock); | 809 | spin_unlock(&inode->i_lock); |
| 808 | } | 810 | } |
| 809 | extern void ceph_put_cap(struct ceph_cap *cap); | 811 | extern void ceph_put_cap(struct ceph_mds_client *mdsc, |
| 812 | struct ceph_cap *cap); | ||
| 810 | 813 | ||
| 811 | extern void ceph_queue_caps_release(struct inode *inode); | 814 | extern void ceph_queue_caps_release(struct inode *inode); |
| 812 | extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); | 815 | extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); |
