diff options
author | Alex Elder <elder@dreamhost.com> | 2012-01-29 14:57:44 -0500 |
---|---|---|
committer | Alex Elder <elder@dreamhost.com> | 2012-03-22 11:47:47 -0400 |
commit | d184f6bfde1428ad4a690d49b28afc9ab4d57b35 (patch) | |
tree | fd3156df944edc90146143e5cea416d34c8fcd47 /drivers/block | |
parent | 499afd5b8e742792fda6bd7730c738ad83aecf6b (diff) |
rbd: restore previous rbd id sequence behavior
It used to be that selecting a new unique identifier for an added
rbd device required searching all existing ones to find the highest
id is used. A recent change made that unnecessary, but made it
so that id's used were monotonically non-decreasing. It's a bit
more pleasant to have smaller rbd id's though, and this change
makes ids get allocated as they were before--each new id is one more
than the maximum currently in use.
Signed-off-by: Alex Elder <elder@dreamhost.com>
Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/rbd.c | 40 |
1 files changed, 34 insertions, 6 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e7727e8337fc..9ac1484a95ad 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2172,18 +2172,46 @@ static void rbd_id_get(struct rbd_device *rbd_dev) | |||
2172 | */ | 2172 | */ |
2173 | static void rbd_id_put(struct rbd_device *rbd_dev) | 2173 | static void rbd_id_put(struct rbd_device *rbd_dev) |
2174 | { | 2174 | { |
2175 | BUG_ON(rbd_dev->id < 1); | 2175 | struct list_head *tmp; |
2176 | int rbd_id = rbd_dev->id; | ||
2177 | int max_id; | ||
2178 | |||
2179 | BUG_ON(rbd_id < 1); | ||
2176 | 2180 | ||
2177 | spin_lock(&rbd_dev_list_lock); | 2181 | spin_lock(&rbd_dev_list_lock); |
2178 | list_del_init(&rbd_dev->node); | 2182 | list_del_init(&rbd_dev->node); |
2183 | |||
2184 | /* | ||
2185 | * If the id being "put" is not the current maximum, there | ||
2186 | * is nothing special we need to do. | ||
2187 | */ | ||
2188 | if (rbd_id != atomic64_read(&rbd_id_max)) { | ||
2189 | spin_unlock(&rbd_dev_list_lock); | ||
2190 | return; | ||
2191 | } | ||
2192 | |||
2193 | /* | ||
2194 | * We need to update the current maximum id. Search the | ||
2195 | * list to find out what it is. We're more likely to find | ||
2196 | * the maximum at the end, so search the list backward. | ||
2197 | */ | ||
2198 | max_id = 0; | ||
2199 | list_for_each_prev(tmp, &rbd_dev_list) { | ||
2200 | struct rbd_device *rbd_dev; | ||
2201 | |||
2202 | rbd_dev = list_entry(tmp, struct rbd_device, node); | ||
2203 | if (rbd_id > max_id) | ||
2204 | max_id = rbd_id; | ||
2205 | } | ||
2179 | spin_unlock(&rbd_dev_list_lock); | 2206 | spin_unlock(&rbd_dev_list_lock); |
2180 | 2207 | ||
2181 | /* | 2208 | /* |
2182 | * New id's are always one more than the current maximum. | 2209 | * The max id could have been updated by rbd_id_get(), in |
2183 | * If the id being "put" *is* that maximum, decrement the | 2210 | * which case it now accurately reflects the new maximum. |
2184 | * maximum so the next one requested just reuses this one. | 2211 | * Be careful not to overwrite the maximum value in that |
2212 | * case. | ||
2185 | */ | 2213 | */ |
2186 | atomic64_cmpxchg(&rbd_id_max, rbd_dev->id, rbd_dev->id - 1); | 2214 | atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id); |
2187 | } | 2215 | } |
2188 | 2216 | ||
2189 | static ssize_t rbd_add(struct bus_type *bus, | 2217 | static ssize_t rbd_add(struct bus_type *bus, |
@@ -2220,7 +2248,7 @@ static ssize_t rbd_add(struct bus_type *bus, | |||
2220 | 2248 | ||
2221 | init_rwsem(&rbd_dev->header.snap_rwsem); | 2249 | init_rwsem(&rbd_dev->header.snap_rwsem); |
2222 | 2250 | ||
2223 | /* generate unique id: one more than highest used so far */ | 2251 | /* generate unique id: find highest unique id, add one */ |
2224 | rbd_id_get(rbd_dev); | 2252 | rbd_id_get(rbd_dev); |
2225 | 2253 | ||
2226 | /* parse add command */ | 2254 | /* parse add command */ |