aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_actlog.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-07-03 07:26:43 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:42:24 -0500
commitb30ab7913b0a7b1d3b1091c8cb3abb1a9f1e0824 (patch)
tree308a13ddc14374d86a2874d740aa5bc17a64b54f /drivers/block/drbd/drbd_actlog.c
parent547616979372b65646d691e8dab90e850be582fe (diff)
drbd: Rename "mdev" to "device"
sed -i -e 's:mdev:device:g' Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_actlog.c')
-rw-r--r--drivers/block/drbd/drbd_actlog.c544
1 files changed, 272 insertions, 272 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 16041f8e2a60..b33836d72f3c 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -105,24 +105,24 @@ struct update_al_work {
105}; 105};
106 106
107 107
108void *drbd_md_get_buffer(struct drbd_device *mdev) 108void *drbd_md_get_buffer(struct drbd_device *device)
109{ 109{
110 int r; 110 int r;
111 111
112 wait_event(mdev->misc_wait, 112 wait_event(device->misc_wait,
113 (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 || 113 (r = atomic_cmpxchg(&device->md_io_in_use, 0, 1)) == 0 ||
114 mdev->state.disk <= D_FAILED); 114 device->state.disk <= D_FAILED);
115 115
116 return r ? NULL : page_address(mdev->md_io_page); 116 return r ? NULL : page_address(device->md_io_page);
117} 117}
118 118
119void drbd_md_put_buffer(struct drbd_device *mdev) 119void drbd_md_put_buffer(struct drbd_device *device)
120{ 120{
121 if (atomic_dec_and_test(&mdev->md_io_in_use)) 121 if (atomic_dec_and_test(&device->md_io_in_use))
122 wake_up(&mdev->misc_wait); 122 wake_up(&device->misc_wait);
123} 123}
124 124
125void wait_until_done_or_force_detached(struct drbd_device *mdev, struct drbd_backing_dev *bdev, 125void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
126 unsigned int *done) 126 unsigned int *done)
127{ 127{
128 long dt; 128 long dt;
@@ -134,15 +134,15 @@ void wait_until_done_or_force_detached(struct drbd_device *mdev, struct drbd_bac
134 if (dt == 0) 134 if (dt == 0)
135 dt = MAX_SCHEDULE_TIMEOUT; 135 dt = MAX_SCHEDULE_TIMEOUT;
136 136
137 dt = wait_event_timeout(mdev->misc_wait, 137 dt = wait_event_timeout(device->misc_wait,
138 *done || test_bit(FORCE_DETACH, &mdev->flags), dt); 138 *done || test_bit(FORCE_DETACH, &device->flags), dt);
139 if (dt == 0) { 139 if (dt == 0) {
140 dev_err(DEV, "meta-data IO operation timed out\n"); 140 dev_err(DEV, "meta-data IO operation timed out\n");
141 drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH); 141 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
142 } 142 }
143} 143}
144 144
145static int _drbd_md_sync_page_io(struct drbd_device *mdev, 145static int _drbd_md_sync_page_io(struct drbd_device *device,
146 struct drbd_backing_dev *bdev, 146 struct drbd_backing_dev *bdev,
147 struct page *page, sector_t sector, 147 struct page *page, sector_t sector,
148 int rw, int size) 148 int rw, int size)
@@ -150,10 +150,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *mdev,
150 struct bio *bio; 150 struct bio *bio;
151 int err; 151 int err;
152 152
153 mdev->md_io.done = 0; 153 device->md_io.done = 0;
154 mdev->md_io.error = -ENODEV; 154 device->md_io.error = -ENODEV;
155 155
156 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) 156 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
157 rw |= REQ_FUA | REQ_FLUSH; 157 rw |= REQ_FUA | REQ_FLUSH;
158 rw |= REQ_SYNC; 158 rw |= REQ_SYNC;
159 159
@@ -163,14 +163,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *mdev,
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
166 bio->bi_private = &mdev->md_io; 166 bio->bi_private = &device->md_io;
167 bio->bi_end_io = drbd_md_io_complete; 167 bio->bi_end_io = drbd_md_io_complete;
168 bio->bi_rw = rw; 168 bio->bi_rw = rw;
169 169
170 if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL) 170 if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
171 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ 171 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
172 ; 172 ;
173 else if (!get_ldev_if_state(mdev, D_ATTACHING)) { 173 else if (!get_ldev_if_state(device, D_ATTACHING)) {
174 /* Corresponding put_ldev in drbd_md_io_complete() */ 174 /* Corresponding put_ldev in drbd_md_io_complete() */
175 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); 175 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
176 err = -ENODEV; 176 err = -ENODEV;
@@ -178,27 +178,27 @@ static int _drbd_md_sync_page_io(struct drbd_device *mdev,
178 } 178 }
179 179
180 bio_get(bio); /* one bio_put() is in the completion handler */ 180 bio_get(bio); /* one bio_put() is in the completion handler */
181 atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */ 181 atomic_inc(&device->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
182 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 182 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
183 bio_endio(bio, -EIO); 183 bio_endio(bio, -EIO);
184 else 184 else
185 submit_bio(rw, bio); 185 submit_bio(rw, bio);
186 wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done); 186 wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
187 if (bio_flagged(bio, BIO_UPTODATE)) 187 if (bio_flagged(bio, BIO_UPTODATE))
188 err = mdev->md_io.error; 188 err = device->md_io.error;
189 189
190 out: 190 out:
191 bio_put(bio); 191 bio_put(bio);
192 return err; 192 return err;
193} 193}
194 194
195int drbd_md_sync_page_io(struct drbd_device *mdev, struct drbd_backing_dev *bdev, 195int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
196 sector_t sector, int rw) 196 sector_t sector, int rw)
197{ 197{
198 int err; 198 int err;
199 struct page *iop = mdev->md_io_page; 199 struct page *iop = device->md_io_page;
200 200
201 D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1); 201 D_ASSERT(atomic_read(&device->md_io_in_use) == 1);
202 202
203 BUG_ON(!bdev->md_bdev); 203 BUG_ON(!bdev->md_bdev);
204 204
@@ -214,7 +214,7 @@ int drbd_md_sync_page_io(struct drbd_device *mdev, struct drbd_backing_dev *bdev
214 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); 214 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
215 215
216 /* we do all our meta data IO in aligned 4k blocks. */ 216 /* we do all our meta data IO in aligned 4k blocks. */
217 err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096); 217 err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096);
218 if (err) { 218 if (err) {
219 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", 219 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
220 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); 220 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
@@ -222,10 +222,10 @@ int drbd_md_sync_page_io(struct drbd_device *mdev, struct drbd_backing_dev *bdev
222 return err; 222 return err;
223} 223}
224 224
225static struct bm_extent *find_active_resync_extent(struct drbd_device *mdev, unsigned int enr) 225static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
226{ 226{
227 struct lc_element *tmp; 227 struct lc_element *tmp;
228 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); 228 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
229 if (unlikely(tmp != NULL)) { 229 if (unlikely(tmp != NULL)) {
230 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 230 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
231 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) 231 if (test_bit(BME_NO_WRITES, &bm_ext->flags))
@@ -234,30 +234,30 @@ static struct bm_extent *find_active_resync_extent(struct drbd_device *mdev, uns
234 return NULL; 234 return NULL;
235} 235}
236 236
237static struct lc_element *_al_get(struct drbd_device *mdev, unsigned int enr, bool nonblock) 237static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
238{ 238{
239 struct lc_element *al_ext; 239 struct lc_element *al_ext;
240 struct bm_extent *bm_ext; 240 struct bm_extent *bm_ext;
241 int wake; 241 int wake;
242 242
243 spin_lock_irq(&mdev->al_lock); 243 spin_lock_irq(&device->al_lock);
244 bm_ext = find_active_resync_extent(mdev, enr); 244 bm_ext = find_active_resync_extent(device, enr);
245 if (bm_ext) { 245 if (bm_ext) {
246 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); 246 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
247 spin_unlock_irq(&mdev->al_lock); 247 spin_unlock_irq(&device->al_lock);
248 if (wake) 248 if (wake)
249 wake_up(&mdev->al_wait); 249 wake_up(&device->al_wait);
250 return NULL; 250 return NULL;
251 } 251 }
252 if (nonblock) 252 if (nonblock)
253 al_ext = lc_try_get(mdev->act_log, enr); 253 al_ext = lc_try_get(device->act_log, enr);
254 else 254 else
255 al_ext = lc_get(mdev->act_log, enr); 255 al_ext = lc_get(device->act_log, enr);
256 spin_unlock_irq(&mdev->al_lock); 256 spin_unlock_irq(&device->al_lock);
257 return al_ext; 257 return al_ext;
258} 258}
259 259
260bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i) 260bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
261{ 261{
262 /* for bios crossing activity log extent boundaries, 262 /* for bios crossing activity log extent boundaries,
263 * we may need to activate two extents in one go */ 263 * we may need to activate two extents in one go */
@@ -265,17 +265,17 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i
265 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 265 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
266 266
267 D_ASSERT((unsigned)(last - first) <= 1); 267 D_ASSERT((unsigned)(last - first) <= 1);
268 D_ASSERT(atomic_read(&mdev->local_cnt) > 0); 268 D_ASSERT(atomic_read(&device->local_cnt) > 0);
269 269
270 /* FIXME figure out a fast path for bios crossing AL extent boundaries */ 270 /* FIXME figure out a fast path for bios crossing AL extent boundaries */
271 if (first != last) 271 if (first != last)
272 return false; 272 return false;
273 273
274 return _al_get(mdev, first, true); 274 return _al_get(device, first, true);
275} 275}
276 276
277static 277static
278bool drbd_al_begin_io_prepare(struct drbd_device *mdev, struct drbd_interval *i) 278bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
279{ 279{
280 /* for bios crossing activity log extent boundaries, 280 /* for bios crossing activity log extent boundaries,
281 * we may need to activate two extents in one go */ 281 * we may need to activate two extents in one go */
@@ -285,19 +285,19 @@ bool drbd_al_begin_io_prepare(struct drbd_device *mdev, struct drbd_interval *i)
285 bool need_transaction = false; 285 bool need_transaction = false;
286 286
287 D_ASSERT(first <= last); 287 D_ASSERT(first <= last);
288 D_ASSERT(atomic_read(&mdev->local_cnt) > 0); 288 D_ASSERT(atomic_read(&device->local_cnt) > 0);
289 289
290 for (enr = first; enr <= last; enr++) { 290 for (enr = first; enr <= last; enr++) {
291 struct lc_element *al_ext; 291 struct lc_element *al_ext;
292 wait_event(mdev->al_wait, 292 wait_event(device->al_wait,
293 (al_ext = _al_get(mdev, enr, false)) != NULL); 293 (al_ext = _al_get(device, enr, false)) != NULL);
294 if (al_ext->lc_number != enr) 294 if (al_ext->lc_number != enr)
295 need_transaction = true; 295 need_transaction = true;
296 } 296 }
297 return need_transaction; 297 return need_transaction;
298} 298}
299 299
300static int al_write_transaction(struct drbd_device *mdev, bool delegate); 300static int al_write_transaction(struct drbd_device *device, bool delegate);
301 301
302/* When called through generic_make_request(), we must delegate 302/* When called through generic_make_request(), we must delegate
303 * activity log I/O to the worker thread: a further request 303 * activity log I/O to the worker thread: a further request
@@ -311,58 +311,58 @@ static int al_write_transaction(struct drbd_device *mdev, bool delegate);
311/* 311/*
312 * @delegate: delegate activity log I/O to the worker thread 312 * @delegate: delegate activity log I/O to the worker thread
313 */ 313 */
314void drbd_al_begin_io_commit(struct drbd_device *mdev, bool delegate) 314void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
315{ 315{
316 bool locked = false; 316 bool locked = false;
317 317
318 BUG_ON(delegate && current == mdev->tconn->worker.task); 318 BUG_ON(delegate && current == device->tconn->worker.task);
319 319
320 /* Serialize multiple transactions. 320 /* Serialize multiple transactions.
321 * This uses test_and_set_bit, memory barrier is implicit. 321 * This uses test_and_set_bit, memory barrier is implicit.
322 */ 322 */
323 wait_event(mdev->al_wait, 323 wait_event(device->al_wait,
324 mdev->act_log->pending_changes == 0 || 324 device->act_log->pending_changes == 0 ||
325 (locked = lc_try_lock_for_transaction(mdev->act_log))); 325 (locked = lc_try_lock_for_transaction(device->act_log)));
326 326
327 if (locked) { 327 if (locked) {
328 /* Double check: it may have been committed by someone else, 328 /* Double check: it may have been committed by someone else,
329 * while we have been waiting for the lock. */ 329 * while we have been waiting for the lock. */
330 if (mdev->act_log->pending_changes) { 330 if (device->act_log->pending_changes) {
331 bool write_al_updates; 331 bool write_al_updates;
332 332
333 rcu_read_lock(); 333 rcu_read_lock();
334 write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates; 334 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
335 rcu_read_unlock(); 335 rcu_read_unlock();
336 336
337 if (write_al_updates) 337 if (write_al_updates)
338 al_write_transaction(mdev, delegate); 338 al_write_transaction(device, delegate);
339 spin_lock_irq(&mdev->al_lock); 339 spin_lock_irq(&device->al_lock);
340 /* FIXME 340 /* FIXME
341 if (err) 341 if (err)
342 we need an "lc_cancel" here; 342 we need an "lc_cancel" here;
343 */ 343 */
344 lc_committed(mdev->act_log); 344 lc_committed(device->act_log);
345 spin_unlock_irq(&mdev->al_lock); 345 spin_unlock_irq(&device->al_lock);
346 } 346 }
347 lc_unlock(mdev->act_log); 347 lc_unlock(device->act_log);
348 wake_up(&mdev->al_wait); 348 wake_up(&device->al_wait);
349 } 349 }
350} 350}
351 351
352/* 352/*
353 * @delegate: delegate activity log I/O to the worker thread 353 * @delegate: delegate activity log I/O to the worker thread
354 */ 354 */
355void drbd_al_begin_io(struct drbd_device *mdev, struct drbd_interval *i, bool delegate) 355void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
356{ 356{
357 BUG_ON(delegate && current == mdev->tconn->worker.task); 357 BUG_ON(delegate && current == device->tconn->worker.task);
358 358
359 if (drbd_al_begin_io_prepare(mdev, i)) 359 if (drbd_al_begin_io_prepare(device, i))
360 drbd_al_begin_io_commit(mdev, delegate); 360 drbd_al_begin_io_commit(device, delegate);
361} 361}
362 362
363int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i) 363int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
364{ 364{
365 struct lru_cache *al = mdev->act_log; 365 struct lru_cache *al = device->act_log;
366 /* for bios crossing activity log extent boundaries, 366 /* for bios crossing activity log extent boundaries,
367 * we may need to activate two extents in one go */ 367 * we may need to activate two extents in one go */
368 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 368 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
@@ -386,7 +386,7 @@ int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i)
386 /* Is resync active in this area? */ 386 /* Is resync active in this area? */
387 for (enr = first; enr <= last; enr++) { 387 for (enr = first; enr <= last; enr++) {
388 struct lc_element *tmp; 388 struct lc_element *tmp;
389 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); 389 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
390 if (unlikely(tmp != NULL)) { 390 if (unlikely(tmp != NULL)) {
391 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 391 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
392 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { 392 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
@@ -402,14 +402,14 @@ int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i)
402 * this has to be successful. */ 402 * this has to be successful. */
403 for (enr = first; enr <= last; enr++) { 403 for (enr = first; enr <= last; enr++) {
404 struct lc_element *al_ext; 404 struct lc_element *al_ext;
405 al_ext = lc_get_cumulative(mdev->act_log, enr); 405 al_ext = lc_get_cumulative(device->act_log, enr);
406 if (!al_ext) 406 if (!al_ext)
407 dev_info(DEV, "LOGIC BUG for enr=%u\n", enr); 407 dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
408 } 408 }
409 return 0; 409 return 0;
410} 410}
411 411
412void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i) 412void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
413{ 413{
414 /* for bios crossing activity log extent boundaries, 414 /* for bios crossing activity log extent boundaries,
415 * we may need to activate two extents in one go */ 415 * we may need to activate two extents in one go */
@@ -420,18 +420,18 @@ void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i)
420 unsigned long flags; 420 unsigned long flags;
421 421
422 D_ASSERT(first <= last); 422 D_ASSERT(first <= last);
423 spin_lock_irqsave(&mdev->al_lock, flags); 423 spin_lock_irqsave(&device->al_lock, flags);
424 424
425 for (enr = first; enr <= last; enr++) { 425 for (enr = first; enr <= last; enr++) {
426 extent = lc_find(mdev->act_log, enr); 426 extent = lc_find(device->act_log, enr);
427 if (!extent) { 427 if (!extent) {
428 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); 428 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
429 continue; 429 continue;
430 } 430 }
431 lc_put(mdev->act_log, extent); 431 lc_put(device->act_log, extent);
432 } 432 }
433 spin_unlock_irqrestore(&mdev->al_lock, flags); 433 spin_unlock_irqrestore(&device->al_lock, flags);
434 wake_up(&mdev->al_wait); 434 wake_up(&device->al_wait);
435} 435}
436 436
437#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) 437#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -461,13 +461,13 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
461 (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); 461 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
462} 462}
463 463
464static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *mdev) 464static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
465{ 465{
466 const unsigned int stripes = mdev->ldev->md.al_stripes; 466 const unsigned int stripes = device->ldev->md.al_stripes;
467 const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k; 467 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
468 468
469 /* transaction number, modulo on-disk ring buffer wrap around */ 469 /* transaction number, modulo on-disk ring buffer wrap around */
470 unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k); 470 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
471 471
472 /* ... to aligned 4k on disk block */ 472 /* ... to aligned 4k on disk block */
473 t = ((t % stripes) * stripe_size_4kB) + t/stripes; 473 t = ((t % stripes) * stripe_size_4kB) + t/stripes;
@@ -476,11 +476,11 @@ static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *mdev)
476 t *= 8; 476 t *= 8;
477 477
478 /* ... plus offset to the on disk position */ 478 /* ... plus offset to the on disk position */
479 return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t; 479 return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
480} 480}
481 481
482static int 482static int
483_al_write_transaction(struct drbd_device *mdev) 483_al_write_transaction(struct drbd_device *device)
484{ 484{
485 struct al_transaction_on_disk *buffer; 485 struct al_transaction_on_disk *buffer;
486 struct lc_element *e; 486 struct lc_element *e;
@@ -490,31 +490,31 @@ _al_write_transaction(struct drbd_device *mdev)
490 unsigned crc = 0; 490 unsigned crc = 0;
491 int err = 0; 491 int err = 0;
492 492
493 if (!get_ldev(mdev)) { 493 if (!get_ldev(device)) {
494 dev_err(DEV, "disk is %s, cannot start al transaction\n", 494 dev_err(DEV, "disk is %s, cannot start al transaction\n",
495 drbd_disk_str(mdev->state.disk)); 495 drbd_disk_str(device->state.disk));
496 return -EIO; 496 return -EIO;
497 } 497 }
498 498
499 /* The bitmap write may have failed, causing a state change. */ 499 /* The bitmap write may have failed, causing a state change. */
500 if (mdev->state.disk < D_INCONSISTENT) { 500 if (device->state.disk < D_INCONSISTENT) {
501 dev_err(DEV, 501 dev_err(DEV,
502 "disk is %s, cannot write al transaction\n", 502 "disk is %s, cannot write al transaction\n",
503 drbd_disk_str(mdev->state.disk)); 503 drbd_disk_str(device->state.disk));
504 put_ldev(mdev); 504 put_ldev(device);
505 return -EIO; 505 return -EIO;
506 } 506 }
507 507
508 buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */ 508 buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */
509 if (!buffer) { 509 if (!buffer) {
510 dev_err(DEV, "disk failed while waiting for md_io buffer\n"); 510 dev_err(DEV, "disk failed while waiting for md_io buffer\n");
511 put_ldev(mdev); 511 put_ldev(device);
512 return -ENODEV; 512 return -ENODEV;
513 } 513 }
514 514
515 memset(buffer, 0, sizeof(*buffer)); 515 memset(buffer, 0, sizeof(*buffer));
516 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC); 516 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
517 buffer->tr_number = cpu_to_be32(mdev->al_tr_number); 517 buffer->tr_number = cpu_to_be32(device->al_tr_number);
518 518
519 i = 0; 519 i = 0;
520 520
@@ -522,8 +522,8 @@ _al_write_transaction(struct drbd_device *mdev)
522 * once we set the LC_LOCKED -- from drbd_al_begin_io(), 522 * once we set the LC_LOCKED -- from drbd_al_begin_io(),
523 * lc_try_lock_for_transaction() --, someone may still 523 * lc_try_lock_for_transaction() --, someone may still
524 * be in the process of changing it. */ 524 * be in the process of changing it. */
525 spin_lock_irq(&mdev->al_lock); 525 spin_lock_irq(&device->al_lock);
526 list_for_each_entry(e, &mdev->act_log->to_be_changed, list) { 526 list_for_each_entry(e, &device->act_log->to_be_changed, list) {
527 if (i == AL_UPDATES_PER_TRANSACTION) { 527 if (i == AL_UPDATES_PER_TRANSACTION) {
528 i++; 528 i++;
529 break; 529 break;
@@ -531,11 +531,11 @@ _al_write_transaction(struct drbd_device *mdev)
531 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index); 531 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
532 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number); 532 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
533 if (e->lc_number != LC_FREE) 533 if (e->lc_number != LC_FREE)
534 drbd_bm_mark_for_writeout(mdev, 534 drbd_bm_mark_for_writeout(device,
535 al_extent_to_bm_page(e->lc_number)); 535 al_extent_to_bm_page(e->lc_number));
536 i++; 536 i++;
537 } 537 }
538 spin_unlock_irq(&mdev->al_lock); 538 spin_unlock_irq(&device->al_lock);
539 BUG_ON(i > AL_UPDATES_PER_TRANSACTION); 539 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
540 540
541 buffer->n_updates = cpu_to_be16(i); 541 buffer->n_updates = cpu_to_be16(i);
@@ -544,48 +544,48 @@ _al_write_transaction(struct drbd_device *mdev)
544 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE); 544 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
545 } 545 }
546 546
547 buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements); 547 buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
548 buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle); 548 buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
549 549
550 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION, 550 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
551 mdev->act_log->nr_elements - mdev->al_tr_cycle); 551 device->act_log->nr_elements - device->al_tr_cycle);
552 for (i = 0; i < mx; i++) { 552 for (i = 0; i < mx; i++) {
553 unsigned idx = mdev->al_tr_cycle + i; 553 unsigned idx = device->al_tr_cycle + i;
554 extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; 554 extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
555 buffer->context[i] = cpu_to_be32(extent_nr); 555 buffer->context[i] = cpu_to_be32(extent_nr);
556 } 556 }
557 for (; i < AL_CONTEXT_PER_TRANSACTION; i++) 557 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
558 buffer->context[i] = cpu_to_be32(LC_FREE); 558 buffer->context[i] = cpu_to_be32(LC_FREE);
559 559
560 mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION; 560 device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
561 if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) 561 if (device->al_tr_cycle >= device->act_log->nr_elements)
562 mdev->al_tr_cycle = 0; 562 device->al_tr_cycle = 0;
563 563
564 sector = al_tr_number_to_on_disk_sector(mdev); 564 sector = al_tr_number_to_on_disk_sector(device);
565 565
566 crc = crc32c(0, buffer, 4096); 566 crc = crc32c(0, buffer, 4096);
567 buffer->crc32c = cpu_to_be32(crc); 567 buffer->crc32c = cpu_to_be32(crc);
568 568
569 if (drbd_bm_write_hinted(mdev)) 569 if (drbd_bm_write_hinted(device))
570 err = -EIO; 570 err = -EIO;
571 else { 571 else {
572 bool write_al_updates; 572 bool write_al_updates;
573 rcu_read_lock(); 573 rcu_read_lock();
574 write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates; 574 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
575 rcu_read_unlock(); 575 rcu_read_unlock();
576 if (write_al_updates) { 576 if (write_al_updates) {
577 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 577 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
578 err = -EIO; 578 err = -EIO;
579 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 579 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
580 } else { 580 } else {
581 mdev->al_tr_number++; 581 device->al_tr_number++;
582 mdev->al_writ_cnt++; 582 device->al_writ_cnt++;
583 } 583 }
584 } 584 }
585 } 585 }
586 586
587 drbd_md_put_buffer(mdev); 587 drbd_md_put_buffer(device);
588 put_ldev(mdev); 588 put_ldev(device);
589 589
590 return err; 590 return err;
591} 591}
@@ -594,10 +594,10 @@ _al_write_transaction(struct drbd_device *mdev)
594static int w_al_write_transaction(struct drbd_work *w, int unused) 594static int w_al_write_transaction(struct drbd_work *w, int unused)
595{ 595{
596 struct update_al_work *aw = container_of(w, struct update_al_work, w); 596 struct update_al_work *aw = container_of(w, struct update_al_work, w);
597 struct drbd_device *mdev = w->mdev; 597 struct drbd_device *device = w->device;
598 int err; 598 int err;
599 599
600 err = _al_write_transaction(mdev); 600 err = _al_write_transaction(device);
601 aw->err = err; 601 aw->err = err;
602 complete(&aw->event); 602 complete(&aw->event);
603 603
@@ -607,63 +607,63 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
607/* Calls from worker context (see w_restart_disk_io()) need to write the 607/* Calls from worker context (see w_restart_disk_io()) need to write the
608 transaction directly. Others came through generic_make_request(), 608 transaction directly. Others came through generic_make_request(),
609 those need to delegate it to the worker. */ 609 those need to delegate it to the worker. */
610static int al_write_transaction(struct drbd_device *mdev, bool delegate) 610static int al_write_transaction(struct drbd_device *device, bool delegate)
611{ 611{
612 if (delegate) { 612 if (delegate) {
613 struct update_al_work al_work; 613 struct update_al_work al_work;
614 init_completion(&al_work.event); 614 init_completion(&al_work.event);
615 al_work.w.cb = w_al_write_transaction; 615 al_work.w.cb = w_al_write_transaction;
616 al_work.w.mdev = mdev; 616 al_work.w.device = device;
617 drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w); 617 drbd_queue_work_front(&device->tconn->sender_work, &al_work.w);
618 wait_for_completion(&al_work.event); 618 wait_for_completion(&al_work.event);
619 return al_work.err; 619 return al_work.err;
620 } else 620 } else
621 return _al_write_transaction(mdev); 621 return _al_write_transaction(device);
622} 622}
623 623
624static int _try_lc_del(struct drbd_device *mdev, struct lc_element *al_ext) 624static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
625{ 625{
626 int rv; 626 int rv;
627 627
628 spin_lock_irq(&mdev->al_lock); 628 spin_lock_irq(&device->al_lock);
629 rv = (al_ext->refcnt == 0); 629 rv = (al_ext->refcnt == 0);
630 if (likely(rv)) 630 if (likely(rv))
631 lc_del(mdev->act_log, al_ext); 631 lc_del(device->act_log, al_ext);
632 spin_unlock_irq(&mdev->al_lock); 632 spin_unlock_irq(&device->al_lock);
633 633
634 return rv; 634 return rv;
635} 635}
636 636
637/** 637/**
638 * drbd_al_shrink() - Removes all active extents form the activity log 638 * drbd_al_shrink() - Removes all active extents form the activity log
639 * @mdev: DRBD device. 639 * @device: DRBD device.
640 * 640 *
641 * Removes all active extents form the activity log, waiting until 641 * Removes all active extents form the activity log, waiting until
642 * the reference count of each entry dropped to 0 first, of course. 642 * the reference count of each entry dropped to 0 first, of course.
643 * 643 *
644 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() 644 * You need to lock device->act_log with lc_try_lock() / lc_unlock()
645 */ 645 */
646void drbd_al_shrink(struct drbd_device *mdev) 646void drbd_al_shrink(struct drbd_device *device)
647{ 647{
648 struct lc_element *al_ext; 648 struct lc_element *al_ext;
649 int i; 649 int i;
650 650
651 D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags)); 651 D_ASSERT(test_bit(__LC_LOCKED, &device->act_log->flags));
652 652
653 for (i = 0; i < mdev->act_log->nr_elements; i++) { 653 for (i = 0; i < device->act_log->nr_elements; i++) {
654 al_ext = lc_element_by_index(mdev->act_log, i); 654 al_ext = lc_element_by_index(device->act_log, i);
655 if (al_ext->lc_number == LC_FREE) 655 if (al_ext->lc_number == LC_FREE)
656 continue; 656 continue;
657 wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext)); 657 wait_event(device->al_wait, _try_lc_del(device, al_ext));
658 } 658 }
659 659
660 wake_up(&mdev->al_wait); 660 wake_up(&device->al_wait);
661} 661}
662 662
663int drbd_initialize_al(struct drbd_device *mdev, void *buffer) 663int drbd_initialize_al(struct drbd_device *device, void *buffer)
664{ 664{
665 struct al_transaction_on_disk *al = buffer; 665 struct al_transaction_on_disk *al = buffer;
666 struct drbd_md *md = &mdev->ldev->md; 666 struct drbd_md *md = &device->ldev->md;
667 sector_t al_base = md->md_offset + md->al_offset; 667 sector_t al_base = md->md_offset + md->al_offset;
668 int al_size_4k = md->al_stripes * md->al_stripe_size_4k; 668 int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
669 int i; 669 int i;
@@ -674,7 +674,7 @@ int drbd_initialize_al(struct drbd_device *mdev, void *buffer)
674 al->crc32c = cpu_to_be32(crc32c(0, al, 4096)); 674 al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
675 675
676 for (i = 0; i < al_size_4k; i++) { 676 for (i = 0; i < al_size_4k; i++) {
677 int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE); 677 int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE);
678 if (err) 678 if (err)
679 return err; 679 return err;
680 } 680 }
@@ -684,32 +684,32 @@ int drbd_initialize_al(struct drbd_device *mdev, void *buffer)
684static int w_update_odbm(struct drbd_work *w, int unused) 684static int w_update_odbm(struct drbd_work *w, int unused)
685{ 685{
686 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 686 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
687 struct drbd_device *mdev = w->mdev; 687 struct drbd_device *device = w->device;
688 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; 688 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
689 689
690 if (!get_ldev(mdev)) { 690 if (!get_ldev(device)) {
691 if (__ratelimit(&drbd_ratelimit_state)) 691 if (__ratelimit(&drbd_ratelimit_state))
692 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); 692 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
693 kfree(udw); 693 kfree(udw);
694 return 0; 694 return 0;
695 } 695 }
696 696
697 drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr)); 697 drbd_bm_write_page(device, rs_extent_to_bm_page(udw->enr));
698 put_ldev(mdev); 698 put_ldev(device);
699 699
700 kfree(udw); 700 kfree(udw);
701 701
702 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) { 702 if (drbd_bm_total_weight(device) <= device->rs_failed) {
703 switch (mdev->state.conn) { 703 switch (device->state.conn) {
704 case C_SYNC_SOURCE: case C_SYNC_TARGET: 704 case C_SYNC_SOURCE: case C_SYNC_TARGET:
705 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: 705 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
706 drbd_resync_finished(mdev); 706 drbd_resync_finished(device);
707 default: 707 default:
708 /* nothing to do */ 708 /* nothing to do */
709 break; 709 break;
710 } 710 }
711 } 711 }
712 drbd_bcast_event(mdev, &sib); 712 drbd_bcast_event(device, &sib);
713 713
714 return 0; 714 return 0;
715} 715}
@@ -721,7 +721,7 @@ static int w_update_odbm(struct drbd_work *w, int unused)
721 * 721 *
722 * TODO will be obsoleted once we have a caching lru of the on disk bitmap 722 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
723 */ 723 */
724static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector, 724static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t sector,
725 int count, int success) 725 int count, int success)
726{ 726{
727 struct lc_element *e; 727 struct lc_element *e;
@@ -729,13 +729,13 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
729 729
730 unsigned int enr; 730 unsigned int enr;
731 731
732 D_ASSERT(atomic_read(&mdev->local_cnt)); 732 D_ASSERT(atomic_read(&device->local_cnt));
733 733
734 /* I simply assume that a sector/size pair never crosses 734 /* I simply assume that a sector/size pair never crosses
735 * a 16 MB extent border. (Currently this is true...) */ 735 * a 16 MB extent border. (Currently this is true...) */
736 enr = BM_SECT_TO_EXT(sector); 736 enr = BM_SECT_TO_EXT(sector);
737 737
738 e = lc_get(mdev->resync, enr); 738 e = lc_get(device->resync, enr);
739 if (e) { 739 if (e) {
740 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); 740 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
741 if (ext->lce.lc_number == enr) { 741 if (ext->lce.lc_number == enr) {
@@ -749,7 +749,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
749 (unsigned long long)sector, 749 (unsigned long long)sector,
750 ext->lce.lc_number, ext->rs_left, 750 ext->lce.lc_number, ext->rs_left,
751 ext->rs_failed, count, 751 ext->rs_failed, count,
752 drbd_conn_str(mdev->state.conn)); 752 drbd_conn_str(device->state.conn));
753 753
754 /* We don't expect to be able to clear more bits 754 /* We don't expect to be able to clear more bits
755 * than have been set when we originally counted 755 * than have been set when we originally counted
@@ -757,7 +757,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
757 * Whatever the reason (disconnect during resync, 757 * Whatever the reason (disconnect during resync,
758 * delayed local completion of an application write), 758 * delayed local completion of an application write),
759 * try to fix it up by recounting here. */ 759 * try to fix it up by recounting here. */
760 ext->rs_left = drbd_bm_e_weight(mdev, enr); 760 ext->rs_left = drbd_bm_e_weight(device, enr);
761 } 761 }
762 } else { 762 } else {
763 /* Normally this element should be in the cache, 763 /* Normally this element should be in the cache,
@@ -766,7 +766,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
766 * But maybe an application write finished, and we set 766 * But maybe an application write finished, and we set
767 * something outside the resync lru_cache in sync. 767 * something outside the resync lru_cache in sync.
768 */ 768 */
769 int rs_left = drbd_bm_e_weight(mdev, enr); 769 int rs_left = drbd_bm_e_weight(device, enr);
770 if (ext->flags != 0) { 770 if (ext->flags != 0) {
771 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" 771 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
772 " -> %d[%u;00]\n", 772 " -> %d[%u;00]\n",
@@ -783,9 +783,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
783 ext->rs_failed = success ? 0 : count; 783 ext->rs_failed = success ? 0 : count;
784 /* we don't keep a persistent log of the resync lru, 784 /* we don't keep a persistent log of the resync lru,
785 * we can commit any change right away. */ 785 * we can commit any change right away. */
786 lc_committed(mdev->resync); 786 lc_committed(device->resync);
787 } 787 }
788 lc_put(mdev->resync, &ext->lce); 788 lc_put(device->resync, &ext->lce);
789 /* no race, we are within the al_lock! */ 789 /* no race, we are within the al_lock! */
790 790
791 if (ext->rs_left == ext->rs_failed) { 791 if (ext->rs_left == ext->rs_failed) {
@@ -795,32 +795,32 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
795 if (udw) { 795 if (udw) {
796 udw->enr = ext->lce.lc_number; 796 udw->enr = ext->lce.lc_number;
797 udw->w.cb = w_update_odbm; 797 udw->w.cb = w_update_odbm;
798 udw->w.mdev = mdev; 798 udw->w.device = device;
799 drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w); 799 drbd_queue_work_front(&device->tconn->sender_work, &udw->w);
800 } else { 800 } else {
801 dev_warn(DEV, "Could not kmalloc an udw\n"); 801 dev_warn(DEV, "Could not kmalloc an udw\n");
802 } 802 }
803 } 803 }
804 } else { 804 } else {
805 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", 805 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
806 mdev->resync_locked, 806 device->resync_locked,
807 mdev->resync->nr_elements, 807 device->resync->nr_elements,
808 mdev->resync->flags); 808 device->resync->flags);
809 } 809 }
810} 810}
811 811
812void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go) 812void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
813{ 813{
814 unsigned long now = jiffies; 814 unsigned long now = jiffies;
815 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; 815 unsigned long last = device->rs_mark_time[device->rs_last_mark];
816 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; 816 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
817 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { 817 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
818 if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && 818 if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
819 mdev->state.conn != C_PAUSED_SYNC_T && 819 device->state.conn != C_PAUSED_SYNC_T &&
820 mdev->state.conn != C_PAUSED_SYNC_S) { 820 device->state.conn != C_PAUSED_SYNC_S) {
821 mdev->rs_mark_time[next] = now; 821 device->rs_mark_time[next] = now;
822 mdev->rs_mark_left[next] = still_to_go; 822 device->rs_mark_left[next] = still_to_go;
823 mdev->rs_last_mark = next; 823 device->rs_last_mark = next;
824 } 824 }
825 } 825 }
826} 826}
@@ -832,7 +832,7 @@ void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go)
832 * called by worker on C_SYNC_TARGET and receiver on SyncSource. 832 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
833 * 833 *
834 */ 834 */
835void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, int size, 835void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size,
836 const char *file, const unsigned int line) 836 const char *file, const unsigned int line)
837{ 837{
838 /* Is called from worker and receiver context _only_ */ 838 /* Is called from worker and receiver context _only_ */
@@ -848,10 +848,10 @@ void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, int size,
848 return; 848 return;
849 } 849 }
850 850
851 if (!get_ldev(mdev)) 851 if (!get_ldev(device))
852 return; /* no disk, no metadata, no bitmap to clear bits in */ 852 return; /* no disk, no metadata, no bitmap to clear bits in */
853 853
854 nr_sectors = drbd_get_capacity(mdev->this_bdev); 854 nr_sectors = drbd_get_capacity(device->this_bdev);
855 esector = sector + (size >> 9) - 1; 855 esector = sector + (size >> 9) - 1;
856 856
857 if (!expect(sector < nr_sectors)) 857 if (!expect(sector < nr_sectors))
@@ -879,21 +879,21 @@ void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, int size,
879 * ok, (capacity & 7) != 0 sometimes, but who cares... 879 * ok, (capacity & 7) != 0 sometimes, but who cares...
880 * we count rs_{total,left} in bits, not sectors. 880 * we count rs_{total,left} in bits, not sectors.
881 */ 881 */
882 count = drbd_bm_clear_bits(mdev, sbnr, ebnr); 882 count = drbd_bm_clear_bits(device, sbnr, ebnr);
883 if (count) { 883 if (count) {
884 drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); 884 drbd_advance_rs_marks(device, drbd_bm_total_weight(device));
885 spin_lock_irqsave(&mdev->al_lock, flags); 885 spin_lock_irqsave(&device->al_lock, flags);
886 drbd_try_clear_on_disk_bm(mdev, sector, count, true); 886 drbd_try_clear_on_disk_bm(device, sector, count, true);
887 spin_unlock_irqrestore(&mdev->al_lock, flags); 887 spin_unlock_irqrestore(&device->al_lock, flags);
888 888
889 /* just wake_up unconditional now, various lc_chaged(), 889 /* just wake_up unconditional now, various lc_chaged(),
890 * lc_put() in drbd_try_clear_on_disk_bm(). */ 890 * lc_put() in drbd_try_clear_on_disk_bm(). */
891 wake_up = 1; 891 wake_up = 1;
892 } 892 }
893out: 893out:
894 put_ldev(mdev); 894 put_ldev(device);
895 if (wake_up) 895 if (wake_up)
896 wake_up(&mdev->al_wait); 896 wake_up(&device->al_wait);
897} 897}
898 898
899/* 899/*
@@ -904,7 +904,7 @@ out:
904 * called by tl_clear and drbd_send_dblock (==drbd_make_request). 904 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
905 * so this can be _any_ process. 905 * so this can be _any_ process.
906 */ 906 */
907int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, int size, 907int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size,
908 const char *file, const unsigned int line) 908 const char *file, const unsigned int line)
909{ 909{
910 unsigned long sbnr, ebnr, flags; 910 unsigned long sbnr, ebnr, flags;
@@ -922,10 +922,10 @@ int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, int size,
922 return 0; 922 return 0;
923 } 923 }
924 924
925 if (!get_ldev(mdev)) 925 if (!get_ldev(device))
926 return 0; /* no disk, no metadata, no bitmap to set bits in */ 926 return 0; /* no disk, no metadata, no bitmap to set bits in */
927 927
928 nr_sectors = drbd_get_capacity(mdev->this_bdev); 928 nr_sectors = drbd_get_capacity(device->this_bdev);
929 esector = sector + (size >> 9) - 1; 929 esector = sector + (size >> 9) - 1;
930 930
931 if (!expect(sector < nr_sectors)) 931 if (!expect(sector < nr_sectors))
@@ -940,51 +940,51 @@ int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, int size,
940 940
941 /* ok, (capacity & 7) != 0 sometimes, but who cares... 941 /* ok, (capacity & 7) != 0 sometimes, but who cares...
942 * we count rs_{total,left} in bits, not sectors. */ 942 * we count rs_{total,left} in bits, not sectors. */
943 spin_lock_irqsave(&mdev->al_lock, flags); 943 spin_lock_irqsave(&device->al_lock, flags);
944 count = drbd_bm_set_bits(mdev, sbnr, ebnr); 944 count = drbd_bm_set_bits(device, sbnr, ebnr);
945 945
946 enr = BM_SECT_TO_EXT(sector); 946 enr = BM_SECT_TO_EXT(sector);
947 e = lc_find(mdev->resync, enr); 947 e = lc_find(device->resync, enr);
948 if (e) 948 if (e)
949 lc_entry(e, struct bm_extent, lce)->rs_left += count; 949 lc_entry(e, struct bm_extent, lce)->rs_left += count;
950 spin_unlock_irqrestore(&mdev->al_lock, flags); 950 spin_unlock_irqrestore(&device->al_lock, flags);
951 951
952out: 952out:
953 put_ldev(mdev); 953 put_ldev(device);
954 954
955 return count; 955 return count;
956} 956}
957 957
958static 958static
959struct bm_extent *_bme_get(struct drbd_device *mdev, unsigned int enr) 959struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
960{ 960{
961 struct lc_element *e; 961 struct lc_element *e;
962 struct bm_extent *bm_ext; 962 struct bm_extent *bm_ext;
963 int wakeup = 0; 963 int wakeup = 0;
964 unsigned long rs_flags; 964 unsigned long rs_flags;
965 965
966 spin_lock_irq(&mdev->al_lock); 966 spin_lock_irq(&device->al_lock);
967 if (mdev->resync_locked > mdev->resync->nr_elements/2) { 967 if (device->resync_locked > device->resync->nr_elements/2) {
968 spin_unlock_irq(&mdev->al_lock); 968 spin_unlock_irq(&device->al_lock);
969 return NULL; 969 return NULL;
970 } 970 }
971 e = lc_get(mdev->resync, enr); 971 e = lc_get(device->resync, enr);
972 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 972 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
973 if (bm_ext) { 973 if (bm_ext) {
974 if (bm_ext->lce.lc_number != enr) { 974 if (bm_ext->lce.lc_number != enr) {
975 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); 975 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
976 bm_ext->rs_failed = 0; 976 bm_ext->rs_failed = 0;
977 lc_committed(mdev->resync); 977 lc_committed(device->resync);
978 wakeup = 1; 978 wakeup = 1;
979 } 979 }
980 if (bm_ext->lce.refcnt == 1) 980 if (bm_ext->lce.refcnt == 1)
981 mdev->resync_locked++; 981 device->resync_locked++;
982 set_bit(BME_NO_WRITES, &bm_ext->flags); 982 set_bit(BME_NO_WRITES, &bm_ext->flags);
983 } 983 }
984 rs_flags = mdev->resync->flags; 984 rs_flags = device->resync->flags;
985 spin_unlock_irq(&mdev->al_lock); 985 spin_unlock_irq(&device->al_lock);
986 if (wakeup) 986 if (wakeup)
987 wake_up(&mdev->al_wait); 987 wake_up(&device->al_wait);
988 988
989 if (!bm_ext) { 989 if (!bm_ext) {
990 if (rs_flags & LC_STARVING) 990 if (rs_flags & LC_STARVING)
@@ -996,25 +996,25 @@ struct bm_extent *_bme_get(struct drbd_device *mdev, unsigned int enr)
996 return bm_ext; 996 return bm_ext;
997} 997}
998 998
999static int _is_in_al(struct drbd_device *mdev, unsigned int enr) 999static int _is_in_al(struct drbd_device *device, unsigned int enr)
1000{ 1000{
1001 int rv; 1001 int rv;
1002 1002
1003 spin_lock_irq(&mdev->al_lock); 1003 spin_lock_irq(&device->al_lock);
1004 rv = lc_is_used(mdev->act_log, enr); 1004 rv = lc_is_used(device->act_log, enr);
1005 spin_unlock_irq(&mdev->al_lock); 1005 spin_unlock_irq(&device->al_lock);
1006 1006
1007 return rv; 1007 return rv;
1008} 1008}
1009 1009
1010/** 1010/**
1011 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED 1011 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
1012 * @mdev: DRBD device. 1012 * @device: DRBD device.
1013 * @sector: The sector number. 1013 * @sector: The sector number.
1014 * 1014 *
1015 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. 1015 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
1016 */ 1016 */
1017int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector) 1017int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
1018{ 1018{
1019 unsigned int enr = BM_SECT_TO_EXT(sector); 1019 unsigned int enr = BM_SECT_TO_EXT(sector);
1020 struct bm_extent *bm_ext; 1020 struct bm_extent *bm_ext;
@@ -1023,8 +1023,8 @@ int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector)
1023 200 times -> 20 seconds. */ 1023 200 times -> 20 seconds. */
1024 1024
1025retry: 1025retry:
1026 sig = wait_event_interruptible(mdev->al_wait, 1026 sig = wait_event_interruptible(device->al_wait,
1027 (bm_ext = _bme_get(mdev, enr))); 1027 (bm_ext = _bme_get(device, enr)));
1028 if (sig) 1028 if (sig)
1029 return -EINTR; 1029 return -EINTR;
1030 1030
@@ -1032,18 +1032,18 @@ retry:
1032 return 0; 1032 return 0;
1033 1033
1034 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1034 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1035 sig = wait_event_interruptible(mdev->al_wait, 1035 sig = wait_event_interruptible(device->al_wait,
1036 !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || 1036 !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
1037 test_bit(BME_PRIORITY, &bm_ext->flags)); 1037 test_bit(BME_PRIORITY, &bm_ext->flags));
1038 1038
1039 if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { 1039 if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
1040 spin_lock_irq(&mdev->al_lock); 1040 spin_lock_irq(&device->al_lock);
1041 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 1041 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1042 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ 1042 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
1043 mdev->resync_locked--; 1043 device->resync_locked--;
1044 wake_up(&mdev->al_wait); 1044 wake_up(&device->al_wait);
1045 } 1045 }
1046 spin_unlock_irq(&mdev->al_lock); 1046 spin_unlock_irq(&device->al_lock);
1047 if (sig) 1047 if (sig)
1048 return -EINTR; 1048 return -EINTR;
1049 if (schedule_timeout_interruptible(HZ/10)) 1049 if (schedule_timeout_interruptible(HZ/10))
@@ -1060,14 +1060,14 @@ retry:
1060 1060
1061/** 1061/**
1062 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep 1062 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
1063 * @mdev: DRBD device. 1063 * @device: DRBD device.
1064 * @sector: The sector number. 1064 * @sector: The sector number.
1065 * 1065 *
1066 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then 1066 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
1067 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN 1067 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
1068 * if there is still application IO going on in this area. 1068 * if there is still application IO going on in this area.
1069 */ 1069 */
1070int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector) 1070int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1071{ 1071{
1072 unsigned int enr = BM_SECT_TO_EXT(sector); 1072 unsigned int enr = BM_SECT_TO_EXT(sector);
1073 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; 1073 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
@@ -1075,8 +1075,8 @@ int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector)
1075 struct bm_extent *bm_ext; 1075 struct bm_extent *bm_ext;
1076 int i; 1076 int i;
1077 1077
1078 spin_lock_irq(&mdev->al_lock); 1078 spin_lock_irq(&device->al_lock);
1079 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { 1079 if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1080 /* in case you have very heavy scattered io, it may 1080 /* in case you have very heavy scattered io, it may
1081 * stall the syncer undefined if we give up the ref count 1081 * stall the syncer undefined if we give up the ref count
1082 * when we try again and requeue. 1082 * when we try again and requeue.
@@ -1090,28 +1090,28 @@ int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector)
1090 * the lc_put here... 1090 * the lc_put here...
1091 * we also have to wake_up 1091 * we also have to wake_up
1092 */ 1092 */
1093 e = lc_find(mdev->resync, mdev->resync_wenr); 1093 e = lc_find(device->resync, device->resync_wenr);
1094 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1094 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1095 if (bm_ext) { 1095 if (bm_ext) {
1096 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1096 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1097 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1097 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1098 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1098 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1099 mdev->resync_wenr = LC_FREE; 1099 device->resync_wenr = LC_FREE;
1100 if (lc_put(mdev->resync, &bm_ext->lce) == 0) 1100 if (lc_put(device->resync, &bm_ext->lce) == 0)
1101 mdev->resync_locked--; 1101 device->resync_locked--;
1102 wake_up(&mdev->al_wait); 1102 wake_up(&device->al_wait);
1103 } else { 1103 } else {
1104 dev_alert(DEV, "LOGIC BUG\n"); 1104 dev_alert(DEV, "LOGIC BUG\n");
1105 } 1105 }
1106 } 1106 }
1107 /* TRY. */ 1107 /* TRY. */
1108 e = lc_try_get(mdev->resync, enr); 1108 e = lc_try_get(device->resync, enr);
1109 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1109 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1110 if (bm_ext) { 1110 if (bm_ext) {
1111 if (test_bit(BME_LOCKED, &bm_ext->flags)) 1111 if (test_bit(BME_LOCKED, &bm_ext->flags))
1112 goto proceed; 1112 goto proceed;
1113 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { 1113 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1114 mdev->resync_locked++; 1114 device->resync_locked++;
1115 } else { 1115 } else {
1116 /* we did set the BME_NO_WRITES, 1116 /* we did set the BME_NO_WRITES,
1117 * but then could not set BME_LOCKED, 1117 * but then could not set BME_LOCKED,
@@ -1123,13 +1123,13 @@ int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector)
1123 goto check_al; 1123 goto check_al;
1124 } else { 1124 } else {
1125 /* do we rather want to try later? */ 1125 /* do we rather want to try later? */
1126 if (mdev->resync_locked > mdev->resync->nr_elements-3) 1126 if (device->resync_locked > device->resync->nr_elements-3)
1127 goto try_again; 1127 goto try_again;
1128 /* Do or do not. There is no try. -- Yoda */ 1128 /* Do or do not. There is no try. -- Yoda */
1129 e = lc_get(mdev->resync, enr); 1129 e = lc_get(device->resync, enr);
1130 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1130 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1131 if (!bm_ext) { 1131 if (!bm_ext) {
1132 const unsigned long rs_flags = mdev->resync->flags; 1132 const unsigned long rs_flags = device->resync->flags;
1133 if (rs_flags & LC_STARVING) 1133 if (rs_flags & LC_STARVING)
1134 dev_warn(DEV, "Have to wait for element" 1134 dev_warn(DEV, "Have to wait for element"
1135 " (resync LRU too small?)\n"); 1135 " (resync LRU too small?)\n");
@@ -1137,146 +1137,146 @@ int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector)
1137 goto try_again; 1137 goto try_again;
1138 } 1138 }
1139 if (bm_ext->lce.lc_number != enr) { 1139 if (bm_ext->lce.lc_number != enr) {
1140 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); 1140 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1141 bm_ext->rs_failed = 0; 1141 bm_ext->rs_failed = 0;
1142 lc_committed(mdev->resync); 1142 lc_committed(device->resync);
1143 wake_up(&mdev->al_wait); 1143 wake_up(&device->al_wait);
1144 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); 1144 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1145 } 1145 }
1146 set_bit(BME_NO_WRITES, &bm_ext->flags); 1146 set_bit(BME_NO_WRITES, &bm_ext->flags);
1147 D_ASSERT(bm_ext->lce.refcnt == 1); 1147 D_ASSERT(bm_ext->lce.refcnt == 1);
1148 mdev->resync_locked++; 1148 device->resync_locked++;
1149 goto check_al; 1149 goto check_al;
1150 } 1150 }
1151check_al: 1151check_al:
1152 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1152 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1153 if (lc_is_used(mdev->act_log, al_enr+i)) 1153 if (lc_is_used(device->act_log, al_enr+i))
1154 goto try_again; 1154 goto try_again;
1155 } 1155 }
1156 set_bit(BME_LOCKED, &bm_ext->flags); 1156 set_bit(BME_LOCKED, &bm_ext->flags);
1157proceed: 1157proceed:
1158 mdev->resync_wenr = LC_FREE; 1158 device->resync_wenr = LC_FREE;
1159 spin_unlock_irq(&mdev->al_lock); 1159 spin_unlock_irq(&device->al_lock);
1160 return 0; 1160 return 0;
1161 1161
1162try_again: 1162try_again:
1163 if (bm_ext) 1163 if (bm_ext)
1164 mdev->resync_wenr = enr; 1164 device->resync_wenr = enr;
1165 spin_unlock_irq(&mdev->al_lock); 1165 spin_unlock_irq(&device->al_lock);
1166 return -EAGAIN; 1166 return -EAGAIN;
1167} 1167}
1168 1168
1169void drbd_rs_complete_io(struct drbd_device *mdev, sector_t sector) 1169void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1170{ 1170{
1171 unsigned int enr = BM_SECT_TO_EXT(sector); 1171 unsigned int enr = BM_SECT_TO_EXT(sector);
1172 struct lc_element *e; 1172 struct lc_element *e;
1173 struct bm_extent *bm_ext; 1173 struct bm_extent *bm_ext;
1174 unsigned long flags; 1174 unsigned long flags;
1175 1175
1176 spin_lock_irqsave(&mdev->al_lock, flags); 1176 spin_lock_irqsave(&device->al_lock, flags);
1177 e = lc_find(mdev->resync, enr); 1177 e = lc_find(device->resync, enr);
1178 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1178 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1179 if (!bm_ext) { 1179 if (!bm_ext) {
1180 spin_unlock_irqrestore(&mdev->al_lock, flags); 1180 spin_unlock_irqrestore(&device->al_lock, flags);
1181 if (__ratelimit(&drbd_ratelimit_state)) 1181 if (__ratelimit(&drbd_ratelimit_state))
1182 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); 1182 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
1183 return; 1183 return;
1184 } 1184 }
1185 1185
1186 if (bm_ext->lce.refcnt == 0) { 1186 if (bm_ext->lce.refcnt == 0) {
1187 spin_unlock_irqrestore(&mdev->al_lock, flags); 1187 spin_unlock_irqrestore(&device->al_lock, flags);
1188 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " 1188 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
1189 "but refcnt is 0!?\n", 1189 "but refcnt is 0!?\n",
1190 (unsigned long long)sector, enr); 1190 (unsigned long long)sector, enr);
1191 return; 1191 return;
1192 } 1192 }
1193 1193
1194 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 1194 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1195 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ 1195 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
1196 mdev->resync_locked--; 1196 device->resync_locked--;
1197 wake_up(&mdev->al_wait); 1197 wake_up(&device->al_wait);
1198 } 1198 }
1199 1199
1200 spin_unlock_irqrestore(&mdev->al_lock, flags); 1200 spin_unlock_irqrestore(&device->al_lock, flags);
1201} 1201}
1202 1202
1203/** 1203/**
1204 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) 1204 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1205 * @mdev: DRBD device. 1205 * @device: DRBD device.
1206 */ 1206 */
1207void drbd_rs_cancel_all(struct drbd_device *mdev) 1207void drbd_rs_cancel_all(struct drbd_device *device)
1208{ 1208{
1209 spin_lock_irq(&mdev->al_lock); 1209 spin_lock_irq(&device->al_lock);
1210 1210
1211 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ 1211 if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
1212 lc_reset(mdev->resync); 1212 lc_reset(device->resync);
1213 put_ldev(mdev); 1213 put_ldev(device);
1214 } 1214 }
1215 mdev->resync_locked = 0; 1215 device->resync_locked = 0;
1216 mdev->resync_wenr = LC_FREE; 1216 device->resync_wenr = LC_FREE;
1217 spin_unlock_irq(&mdev->al_lock); 1217 spin_unlock_irq(&device->al_lock);
1218 wake_up(&mdev->al_wait); 1218 wake_up(&device->al_wait);
1219} 1219}
1220 1220
1221/** 1221/**
1222 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU 1222 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1223 * @mdev: DRBD device. 1223 * @device: DRBD device.
1224 * 1224 *
1225 * Returns 0 upon success, -EAGAIN if at least one reference count was 1225 * Returns 0 upon success, -EAGAIN if at least one reference count was
1226 * not zero. 1226 * not zero.
1227 */ 1227 */
1228int drbd_rs_del_all(struct drbd_device *mdev) 1228int drbd_rs_del_all(struct drbd_device *device)
1229{ 1229{
1230 struct lc_element *e; 1230 struct lc_element *e;
1231 struct bm_extent *bm_ext; 1231 struct bm_extent *bm_ext;
1232 int i; 1232 int i;
1233 1233
1234 spin_lock_irq(&mdev->al_lock); 1234 spin_lock_irq(&device->al_lock);
1235 1235
1236 if (get_ldev_if_state(mdev, D_FAILED)) { 1236 if (get_ldev_if_state(device, D_FAILED)) {
1237 /* ok, ->resync is there. */ 1237 /* ok, ->resync is there. */
1238 for (i = 0; i < mdev->resync->nr_elements; i++) { 1238 for (i = 0; i < device->resync->nr_elements; i++) {
1239 e = lc_element_by_index(mdev->resync, i); 1239 e = lc_element_by_index(device->resync, i);
1240 bm_ext = lc_entry(e, struct bm_extent, lce); 1240 bm_ext = lc_entry(e, struct bm_extent, lce);
1241 if (bm_ext->lce.lc_number == LC_FREE) 1241 if (bm_ext->lce.lc_number == LC_FREE)
1242 continue; 1242 continue;
1243 if (bm_ext->lce.lc_number == mdev->resync_wenr) { 1243 if (bm_ext->lce.lc_number == device->resync_wenr) {
1244 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" 1244 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
1245 " got 'synced' by application io\n", 1245 " got 'synced' by application io\n",
1246 mdev->resync_wenr); 1246 device->resync_wenr);
1247 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1247 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1248 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1248 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1249 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1249 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1250 mdev->resync_wenr = LC_FREE; 1250 device->resync_wenr = LC_FREE;
1251 lc_put(mdev->resync, &bm_ext->lce); 1251 lc_put(device->resync, &bm_ext->lce);
1252 } 1252 }
1253 if (bm_ext->lce.refcnt != 0) { 1253 if (bm_ext->lce.refcnt != 0) {
1254 dev_info(DEV, "Retrying drbd_rs_del_all() later. " 1254 dev_info(DEV, "Retrying drbd_rs_del_all() later. "
1255 "refcnt=%d\n", bm_ext->lce.refcnt); 1255 "refcnt=%d\n", bm_ext->lce.refcnt);
1256 put_ldev(mdev); 1256 put_ldev(device);
1257 spin_unlock_irq(&mdev->al_lock); 1257 spin_unlock_irq(&device->al_lock);
1258 return -EAGAIN; 1258 return -EAGAIN;
1259 } 1259 }
1260 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1260 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1261 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); 1261 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
1262 lc_del(mdev->resync, &bm_ext->lce); 1262 lc_del(device->resync, &bm_ext->lce);
1263 } 1263 }
1264 D_ASSERT(mdev->resync->used == 0); 1264 D_ASSERT(device->resync->used == 0);
1265 put_ldev(mdev); 1265 put_ldev(device);
1266 } 1266 }
1267 spin_unlock_irq(&mdev->al_lock); 1267 spin_unlock_irq(&device->al_lock);
1268 wake_up(&mdev->al_wait); 1268 wake_up(&device->al_wait);
1269 1269
1270 return 0; 1270 return 0;
1271} 1271}
1272 1272
1273/** 1273/**
1274 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks 1274 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
1275 * @mdev: DRBD device. 1275 * @device: DRBD device.
1276 * @sector: The sector number. 1276 * @sector: The sector number.
1277 * @size: Size of failed IO operation, in byte. 1277 * @size: Size of failed IO operation, in byte.
1278 */ 1278 */
1279void drbd_rs_failed_io(struct drbd_device *mdev, sector_t sector, int size) 1279void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size)
1280{ 1280{
1281 /* Is called from worker and receiver context _only_ */ 1281 /* Is called from worker and receiver context _only_ */
1282 unsigned long sbnr, ebnr, lbnr; 1282 unsigned long sbnr, ebnr, lbnr;
@@ -1289,7 +1289,7 @@ void drbd_rs_failed_io(struct drbd_device *mdev, sector_t sector, int size)
1289 (unsigned long long)sector, size); 1289 (unsigned long long)sector, size);
1290 return; 1290 return;
1291 } 1291 }
1292 nr_sectors = drbd_get_capacity(mdev->this_bdev); 1292 nr_sectors = drbd_get_capacity(device->this_bdev);
1293 esector = sector + (size >> 9) - 1; 1293 esector = sector + (size >> 9) - 1;
1294 1294
1295 if (!expect(sector < nr_sectors)) 1295 if (!expect(sector < nr_sectors))
@@ -1317,21 +1317,21 @@ void drbd_rs_failed_io(struct drbd_device *mdev, sector_t sector, int size)
1317 * ok, (capacity & 7) != 0 sometimes, but who cares... 1317 * ok, (capacity & 7) != 0 sometimes, but who cares...
1318 * we count rs_{total,left} in bits, not sectors. 1318 * we count rs_{total,left} in bits, not sectors.
1319 */ 1319 */
1320 spin_lock_irq(&mdev->al_lock); 1320 spin_lock_irq(&device->al_lock);
1321 count = drbd_bm_count_bits(mdev, sbnr, ebnr); 1321 count = drbd_bm_count_bits(device, sbnr, ebnr);
1322 if (count) { 1322 if (count) {
1323 mdev->rs_failed += count; 1323 device->rs_failed += count;
1324 1324
1325 if (get_ldev(mdev)) { 1325 if (get_ldev(device)) {
1326 drbd_try_clear_on_disk_bm(mdev, sector, count, false); 1326 drbd_try_clear_on_disk_bm(device, sector, count, false);
1327 put_ldev(mdev); 1327 put_ldev(device);
1328 } 1328 }
1329 1329
1330 /* just wake_up unconditional now, various lc_chaged(), 1330 /* just wake_up unconditional now, various lc_chaged(),
1331 * lc_put() in drbd_try_clear_on_disk_bm(). */ 1331 * lc_put() in drbd_try_clear_on_disk_bm(). */
1332 wake_up = 1; 1332 wake_up = 1;
1333 } 1333 }
1334 spin_unlock_irq(&mdev->al_lock); 1334 spin_unlock_irq(&device->al_lock);
1335 if (wake_up) 1335 if (wake_up)
1336 wake_up(&mdev->al_wait); 1336 wake_up(&device->al_wait);
1337} 1337}