diff options
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_trans.c | 224 |
1 files changed, 95 insertions, 129 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 2bff22995127..084bd3a13184 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -49,7 +49,6 @@ | |||
49 | STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); | 49 | STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); |
50 | STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); | 50 | STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); |
51 | STATIC void xfs_trans_committed(xfs_trans_t *, int); | 51 | STATIC void xfs_trans_committed(xfs_trans_t *, int); |
52 | STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); | ||
53 | STATIC void xfs_trans_free(xfs_trans_t *); | 52 | STATIC void xfs_trans_free(xfs_trans_t *); |
54 | 53 | ||
55 | kmem_zone_t *xfs_trans_zone; | 54 | kmem_zone_t *xfs_trans_zone; |
@@ -1301,60 +1300,86 @@ xfs_trans_roll( | |||
1301 | } | 1300 | } |
1302 | 1301 | ||
1303 | /* | 1302 | /* |
1304 | * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). | 1303 | * The committed item processing consists of calling the committed routine of |
1304 | * each logged item, updating the item's position in the AIL if necessary, and | ||
1305 | * unpinning each item. If the committed routine returns -1, then do nothing | ||
1306 | * further with the item because it may have been freed. | ||
1305 | * | 1307 | * |
1306 | * This is typically called by the LM when a transaction has been fully | 1308 | * Since items are unlocked when they are copied to the incore log, it is |
1307 | * committed to disk. It needs to unpin the items which have | 1309 | * possible for two transactions to be completing and manipulating the same |
1308 | * been logged by the transaction and update their positions | 1310 | * item simultaneously. The AIL lock will protect the lsn field of each item. |
1309 | * in the AIL if necessary. | 1311 | * The value of this field can never go backwards. |
1310 | * This also gets called when the transactions didn't get written out | ||
1311 | * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. | ||
1312 | * | 1312 | * |
1313 | * Call xfs_trans_chunk_committed() to process the items in | 1313 | * We unpin the items after repositioning them in the AIL, because otherwise |
1314 | * each chunk. | 1314 | * they could be immediately flushed and we'd have to race with the flusher |
1315 | * trying to pull the item from the AIL as we add it. | ||
1315 | */ | 1316 | */ |
1316 | STATIC void | 1317 | static void |
1317 | xfs_trans_committed( | 1318 | xfs_trans_item_committed( |
1318 | xfs_trans_t *tp, | 1319 | xfs_log_item_t *lip, |
1319 | int abortflag) | 1320 | xfs_lsn_t commit_lsn, |
1321 | int aborted) | ||
1320 | { | 1322 | { |
1321 | xfs_log_item_chunk_t *licp; | 1323 | xfs_lsn_t item_lsn; |
1322 | xfs_log_item_chunk_t *next_licp; | 1324 | struct xfs_ail *ailp; |
1323 | xfs_log_busy_chunk_t *lbcp; | 1325 | |
1324 | xfs_log_busy_slot_t *lbsp; | 1326 | if (aborted) |
1325 | int i; | 1327 | lip->li_flags |= XFS_LI_ABORTED; |
1326 | 1328 | ||
1327 | /* | 1329 | /* |
1328 | * Call the transaction's completion callback if there | 1330 | * Send in the ABORTED flag to the COMMITTED routine so that it knows |
1329 | * is one. | 1331 | * whether the transaction was aborted or not. |
1330 | */ | 1332 | */ |
1331 | if (tp->t_callback != NULL) { | 1333 | item_lsn = IOP_COMMITTED(lip, commit_lsn); |
1332 | tp->t_callback(tp, tp->t_callarg); | ||
1333 | } | ||
1334 | 1334 | ||
1335 | /* | 1335 | /* |
1336 | * Special case the chunk embedded in the transaction. | 1336 | * If the committed routine returns -1, item has been freed. |
1337 | */ | 1337 | */ |
1338 | licp = &(tp->t_items); | 1338 | if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) |
1339 | if (!(xfs_lic_are_all_free(licp))) { | 1339 | return; |
1340 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); | ||
1341 | } | ||
1342 | 1340 | ||
1343 | /* | 1341 | /* |
1344 | * Process the items in each chunk in turn. | 1342 | * If the returned lsn is greater than what it contained before, update |
1343 | * the location of the item in the AIL. If it is not, then do nothing. | ||
1344 | * Items can never move backwards in the AIL. | ||
1345 | * | ||
1346 | * While the new lsn should usually be greater, it is possible that a | ||
1347 | * later transaction completing simultaneously with an earlier one | ||
1348 | * using the same item could complete first with a higher lsn. This | ||
1349 | * would cause the earlier transaction to fail the test below. | ||
1345 | */ | 1350 | */ |
1346 | licp = licp->lic_next; | 1351 | ailp = lip->li_ailp; |
1347 | while (licp != NULL) { | 1352 | spin_lock(&ailp->xa_lock); |
1348 | ASSERT(!xfs_lic_are_all_free(licp)); | 1353 | if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { |
1349 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); | 1354 | /* |
1350 | next_licp = licp->lic_next; | 1355 | * This will set the item's lsn to item_lsn and update the |
1351 | kmem_free(licp); | 1356 | * position of the item in the AIL. |
1352 | licp = next_licp; | 1357 | * |
1358 | * xfs_trans_ail_update() drops the AIL lock. | ||
1359 | */ | ||
1360 | xfs_trans_ail_update(ailp, lip, item_lsn); | ||
1361 | } else { | ||
1362 | spin_unlock(&ailp->xa_lock); | ||
1353 | } | 1363 | } |
1354 | 1364 | ||
1355 | /* | 1365 | /* |
1356 | * Clear all the per-AG busy list items listed in this transaction | 1366 | * Now that we've repositioned the item in the AIL, unpin it so it can |
1367 | * be flushed. Pass information about buffer stale state down from the | ||
1368 | * log item flags, if anyone else stales the buffer we do not want to | ||
1369 | * pay any attention to it. | ||
1357 | */ | 1370 | */ |
1371 | IOP_UNPIN(lip); | ||
1372 | } | ||
1373 | |||
1374 | /* Clear all the per-AG busy list items listed in this transaction */ | ||
1375 | static void | ||
1376 | xfs_trans_clear_busy_extents( | ||
1377 | struct xfs_trans *tp) | ||
1378 | { | ||
1379 | xfs_log_busy_chunk_t *lbcp; | ||
1380 | xfs_log_busy_slot_t *lbsp; | ||
1381 | int i; | ||
1382 | |||
1358 | lbcp = &tp->t_busy; | 1383 | lbcp = &tp->t_busy; |
1359 | while (lbcp != NULL) { | 1384 | while (lbcp != NULL) { |
1360 | for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { | 1385 | for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { |
@@ -1366,107 +1391,48 @@ xfs_trans_committed( | |||
1366 | lbcp = lbcp->lbc_next; | 1391 | lbcp = lbcp->lbc_next; |
1367 | } | 1392 | } |
1368 | xfs_trans_free_busy(tp); | 1393 | xfs_trans_free_busy(tp); |
1369 | |||
1370 | /* | ||
1371 | * That's it for the transaction structure. Free it. | ||
1372 | */ | ||
1373 | xfs_trans_free(tp); | ||
1374 | } | 1394 | } |
1375 | 1395 | ||
1376 | /* | 1396 | /* |
1377 | * This is called to perform the commit processing for each | 1397 | * This is typically called by the LM when a transaction has been fully |
1378 | * item described by the given chunk. | 1398 | * committed to disk. It needs to unpin the items which have |
1379 | * | 1399 | * been logged by the transaction and update their positions |
1380 | * The commit processing consists of unlocking items which were | 1400 | * in the AIL if necessary. |
1381 | * held locked with the SYNC_UNLOCK attribute, calling the committed | ||
1382 | * routine of each logged item, updating the item's position in the AIL | ||
1383 | * if necessary, and unpinning each item. If the committed routine | ||
1384 | * returns -1, then do nothing further with the item because it | ||
1385 | * may have been freed. | ||
1386 | * | ||
1387 | * Since items are unlocked when they are copied to the incore | ||
1388 | * log, it is possible for two transactions to be completing | ||
1389 | * and manipulating the same item simultaneously. The AIL lock | ||
1390 | * will protect the lsn field of each item. The value of this | ||
1391 | * field can never go backwards. | ||
1392 | * | 1401 | * |
1393 | * We unpin the items after repositioning them in the AIL, because | 1402 | * This also gets called when the transactions didn't get written out |
1394 | * otherwise they could be immediately flushed and we'd have to race | 1403 | * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. |
1395 | * with the flusher trying to pull the item from the AIL as we add it. | ||
1396 | */ | 1404 | */ |
1397 | STATIC void | 1405 | STATIC void |
1398 | xfs_trans_chunk_committed( | 1406 | xfs_trans_committed( |
1399 | xfs_log_item_chunk_t *licp, | 1407 | xfs_trans_t *tp, |
1400 | xfs_lsn_t lsn, | 1408 | int abortflag) |
1401 | int aborted) | ||
1402 | { | 1409 | { |
1403 | xfs_log_item_desc_t *lidp; | 1410 | xfs_log_item_desc_t *lidp; |
1404 | xfs_log_item_t *lip; | 1411 | xfs_log_item_chunk_t *licp; |
1405 | xfs_lsn_t item_lsn; | 1412 | xfs_log_item_chunk_t *next_licp; |
1406 | int i; | ||
1407 | |||
1408 | lidp = licp->lic_descs; | ||
1409 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | ||
1410 | struct xfs_ail *ailp; | ||
1411 | |||
1412 | if (xfs_lic_isfree(licp, i)) { | ||
1413 | continue; | ||
1414 | } | ||
1415 | |||
1416 | lip = lidp->lid_item; | ||
1417 | if (aborted) | ||
1418 | lip->li_flags |= XFS_LI_ABORTED; | ||
1419 | |||
1420 | /* | ||
1421 | * Send in the ABORTED flag to the COMMITTED routine | ||
1422 | * so that it knows whether the transaction was aborted | ||
1423 | * or not. | ||
1424 | */ | ||
1425 | item_lsn = IOP_COMMITTED(lip, lsn); | ||
1426 | 1413 | ||
1427 | /* | 1414 | /* |
1428 | * If the committed routine returns -1, make | 1415 | * Call the transaction's completion callback if there |
1429 | * no more references to the item. | 1416 | * is one. |
1430 | */ | 1417 | */ |
1431 | if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { | 1418 | if (tp->t_callback != NULL) { |
1432 | continue; | 1419 | tp->t_callback(tp, tp->t_callarg); |
1433 | } | 1420 | } |
1434 | 1421 | ||
1435 | /* | 1422 | for (lidp = xfs_trans_first_item(tp); |
1436 | * If the returned lsn is greater than what it | 1423 | lidp != NULL; |
1437 | * contained before, update the location of the | 1424 | lidp = xfs_trans_next_item(tp, lidp)) { |
1438 | * item in the AIL. If it is not, then do nothing. | 1425 | xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); |
1439 | * Items can never move backwards in the AIL. | 1426 | } |
1440 | * | ||
1441 | * While the new lsn should usually be greater, it | ||
1442 | * is possible that a later transaction completing | ||
1443 | * simultaneously with an earlier one using the | ||
1444 | * same item could complete first with a higher lsn. | ||
1445 | * This would cause the earlier transaction to fail | ||
1446 | * the test below. | ||
1447 | */ | ||
1448 | ailp = lip->li_ailp; | ||
1449 | spin_lock(&ailp->xa_lock); | ||
1450 | if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { | ||
1451 | /* | ||
1452 | * This will set the item's lsn to item_lsn | ||
1453 | * and update the position of the item in | ||
1454 | * the AIL. | ||
1455 | * | ||
1456 | * xfs_trans_ail_update() drops the AIL lock. | ||
1457 | */ | ||
1458 | xfs_trans_ail_update(ailp, lip, item_lsn); | ||
1459 | } else { | ||
1460 | spin_unlock(&ailp->xa_lock); | ||
1461 | } | ||
1462 | 1427 | ||
1463 | /* | 1428 | /* free the item chunks, ignoring the embedded chunk */ |
1464 | * Now that we've repositioned the item in the AIL, | 1429 | licp = tp->t_items.lic_next; |
1465 | * unpin it so it can be flushed. Pass information | 1430 | while (licp != NULL) { |
1466 | * about buffer stale state down from the log item | 1431 | next_licp = licp->lic_next; |
1467 | * flags, if anyone else stales the buffer we do not | 1432 | kmem_free(licp); |
1468 | * want to pay any attention to it. | 1433 | licp = next_licp; |
1469 | */ | ||
1470 | IOP_UNPIN(lip); | ||
1471 | } | 1434 | } |
1435 | |||
1436 | xfs_trans_clear_busy_extents(tp); | ||
1437 | xfs_trans_free(tp); | ||
1472 | } | 1438 | } |