Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
"These are scattered fixes and one performance improvement. The
biggest functional change is in how we throttle metadata changes. The
new code bumps our average file creation rate up by ~13% in fs_mark,
and lowers CPU usage.

Stefan bisected out a regression in our allocation code that made
balance loop on extents larger than 256MB."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
Btrfs: improve the delayed inode throttling
Btrfs: fix a mismerge in btrfs_balance()
Btrfs: enforce min_bytes parameter during extent allocation
Btrfs: allow running defrag in parallel to administrative tasks
Btrfs: avoid deadlock on transaction waiting list
Btrfs: do not BUG_ON on aborted situation
Btrfs: do not BUG_ON in prepare_to_reloc
Btrfs: free all recorded tree blocks on error
Btrfs: build up error handling for merge_reloc_roots
Btrfs: check for NULL pointer in updating reloc roots
Btrfs: fix unclosed transaction handler when the async transaction commitment fails
Btrfs: fix wrong handle at error path of create_snapshot() when the commit fails
Btrfs: use set_nlink if our i_nlink is 0

+223 -132
+92 -63
fs/btrfs/delayed-inode.c
··· 22 22 #include "disk-io.h" 23 23 #include "transaction.h" 24 24 25 - #define BTRFS_DELAYED_WRITEBACK 400 26 - #define BTRFS_DELAYED_BACKGROUND 100 25 + #define BTRFS_DELAYED_WRITEBACK 512 26 + #define BTRFS_DELAYED_BACKGROUND 128 27 + #define BTRFS_DELAYED_BATCH 16 27 28 28 29 static struct kmem_cache *delayed_node_cache; 29 30 ··· 495 494 BTRFS_DELAYED_DELETION_ITEM); 496 495 } 497 496 497 + static void finish_one_item(struct btrfs_delayed_root *delayed_root) 498 + { 499 + int seq = atomic_inc_return(&delayed_root->items_seq); 500 + if ((atomic_dec_return(&delayed_root->items) < 501 + BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && 502 + waitqueue_active(&delayed_root->wait)) 503 + wake_up(&delayed_root->wait); 504 + } 505 + 498 506 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) 499 507 { 500 508 struct rb_root *root; ··· 522 512 523 513 rb_erase(&delayed_item->rb_node, root); 524 514 delayed_item->delayed_node->count--; 525 - if (atomic_dec_return(&delayed_root->items) < 526 - BTRFS_DELAYED_BACKGROUND && 527 - waitqueue_active(&delayed_root->wait)) 528 - wake_up(&delayed_root->wait); 515 + 516 + finish_one_item(delayed_root); 529 517 } 530 518 531 519 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) ··· 1064 1056 delayed_node->count--; 1065 1057 1066 1058 delayed_root = delayed_node->root->fs_info->delayed_root; 1067 - if (atomic_dec_return(&delayed_root->items) < 1068 - BTRFS_DELAYED_BACKGROUND && 1069 - waitqueue_active(&delayed_root->wait)) 1070 - wake_up(&delayed_root->wait); 1059 + finish_one_item(delayed_root); 1071 1060 } 1072 1061 } 1073 1062 ··· 1309 1304 btrfs_release_delayed_node(delayed_node); 1310 1305 } 1311 1306 1312 - struct btrfs_async_delayed_node { 1313 - struct btrfs_root *root; 1314 - struct btrfs_delayed_node *delayed_node; 1307 + struct btrfs_async_delayed_work { 1308 + struct btrfs_delayed_root *delayed_root; 1309 + int nr; 1315 1310 struct btrfs_work work; 1316 1311 }; 1317 1312 1318 - static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) 1313 + static void btrfs_async_run_delayed_root(struct btrfs_work *work) 1319 1314 { 1320 - struct btrfs_async_delayed_node *async_node; 1315 + struct btrfs_async_delayed_work *async_work; 1316 + struct btrfs_delayed_root *delayed_root; 1321 1317 struct btrfs_trans_handle *trans; 1322 1318 struct btrfs_path *path; 1323 1319 struct btrfs_delayed_node *delayed_node = NULL; 1324 1320 struct btrfs_root *root; 1325 1321 struct btrfs_block_rsv *block_rsv; 1326 - int need_requeue = 0; 1322 + int total_done = 0; 1327 1323 1328 - async_node = container_of(work, struct btrfs_async_delayed_node, work); 1324 + async_work = container_of(work, struct btrfs_async_delayed_work, work); 1325 + delayed_root = async_work->delayed_root; 1329 1326 1330 1327 path = btrfs_alloc_path(); 1331 1328 if (!path) 1332 1329 goto out; 1333 - path->leave_spinning = 1; 1334 1330 1335 - delayed_node = async_node->delayed_node; 1331 + again: 1332 + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2) 1333 + goto free_path; 1334 + 1335 + delayed_node = btrfs_first_prepared_delayed_node(delayed_root); 1336 + if (!delayed_node) 1337 + goto free_path; 1338 + 1339 + path->leave_spinning = 1; 1336 1340 root = delayed_node->root; 1337 1341 1338 1342 trans = btrfs_join_transaction(root); 1339 1343 if (IS_ERR(trans)) 1340 - goto free_path; 1344 + goto release_path; 1341 1345 1342 1346 block_rsv = trans->block_rsv; 1343 1347 trans->block_rsv = &root->fs_info->delayed_block_rsv; ··· 1377 1363 * Task1 will sleep until the transaction is commited. 1378 1364 */ 1379 1365 mutex_lock(&delayed_node->mutex); 1380 - if (delayed_node->count) 1381 - need_requeue = 1; 1382 - else 1383 - btrfs_dequeue_delayed_node(root->fs_info->delayed_root, 1384 - delayed_node); 1366 + btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node); 1385 1367 mutex_unlock(&delayed_node->mutex); 1386 1368 1387 1369 trans->block_rsv = block_rsv; 1388 1370 btrfs_end_transaction_dmeta(trans, root); 1389 1371 btrfs_btree_balance_dirty_nodelay(root); 1372 + 1373 + release_path: 1374 + btrfs_release_path(path); 1375 + total_done++; 1376 + 1377 + btrfs_release_prepared_delayed_node(delayed_node); 1378 + if (async_work->nr == 0 || total_done < async_work->nr) 1379 + goto again; 1380 + 1390 1381 free_path: 1391 1382 btrfs_free_path(path); 1392 1383 out: 1393 - if (need_requeue) 1394 - btrfs_requeue_work(&async_node->work); 1395 - else { 1396 - btrfs_release_prepared_delayed_node(delayed_node); 1397 - kfree(async_node); 1398 - } 1384 + wake_up(&delayed_root->wait); 1385 + kfree(async_work); 1399 1386 } 1400 1387 1401 - static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1402 - struct btrfs_root *root, int all) 1403 - { 1404 - struct btrfs_async_delayed_node *async_node; 1405 - struct btrfs_delayed_node *curr; 1406 - int count = 0; 1407 1388 1408 - again: 1409 - curr = btrfs_first_prepared_delayed_node(delayed_root); 1410 - if (!curr) 1389 + static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1390 + struct btrfs_root *root, int nr) 1391 + { 1392 + struct btrfs_async_delayed_work *async_work; 1393 + 1394 + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1411 1395 return 0; 1412 1396 1413 - async_node = kmalloc(sizeof(*async_node), GFP_NOFS); 1414 - if (!async_node) { 1415 - btrfs_release_prepared_delayed_node(curr); 1397 + async_work = kmalloc(sizeof(*async_work), GFP_NOFS); 1398 + if (!async_work) 1416 1399 return -ENOMEM; 1417 - } 1418 1400 1419 - async_node->root = root; 1420 - async_node->delayed_node = curr; 1401 + async_work->delayed_root = delayed_root; 1402 + async_work->work.func = btrfs_async_run_delayed_root; 1403 + async_work->work.flags = 0; 1404 + async_work->nr = nr; 1421 1405 1422 - async_node->work.func = btrfs_async_run_delayed_node_done; 1423 - async_node->work.flags = 0; 1424 - 1425 - btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); 1426 - count++; 1427 - 1428 - if (all || count < 4) 1429 - goto again; 1430 - 1406 + btrfs_queue_worker(&root->fs_info->delayed_workers, &async_work->work); 1431 1407 return 0; 1432 1408 } 1433 1409 ··· 1428 1424 WARN_ON(btrfs_first_delayed_node(delayed_root)); 1429 1425 } 1430 1426 1427 + static int refs_newer(struct btrfs_delayed_root *delayed_root, 1428 + int seq, int count) 1429 + { 1430 + int val = atomic_read(&delayed_root->items_seq); 1431 + 1432 + if (val < seq || val >= seq + count) 1433 + return 1; 1434 + return 0; 1435 + } 1436 + 1431 1437 void btrfs_balance_delayed_items(struct btrfs_root *root) 1432 1438 { 1433 1439 struct btrfs_delayed_root *delayed_root; 1440 + int seq; 1434 1441 1435 1442 delayed_root = btrfs_get_delayed_root(root); 1436 1443 1437 1444 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1438 1445 return; 1439 1446 1447 + seq = atomic_read(&delayed_root->items_seq); 1448 + 1440 1449 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1441 1450 int ret; 1442 - ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); 1451 + DEFINE_WAIT(__wait); 1452 + 1453 + ret = btrfs_wq_run_delayed_node(delayed_root, root, 0); 1443 1454 if (ret) 1444 1455 return; 1445 1456 1446 - wait_event_interruptible_timeout( 1447 - delayed_root->wait, 1448 - (atomic_read(&delayed_root->items) < 1449 - BTRFS_DELAYED_BACKGROUND), 1450 - HZ); 1451 - return; 1457 + while (1) { 1458 + prepare_to_wait(&delayed_root->wait, &__wait, 1459 + TASK_INTERRUPTIBLE); 1460 + 1461 + if (refs_newer(delayed_root, seq, 1462 + BTRFS_DELAYED_BATCH) || 1463 + atomic_read(&delayed_root->items) < 1464 + BTRFS_DELAYED_BACKGROUND) { 1465 + break; 1466 + } 1467 + if (!signal_pending(current)) 1468 + schedule(); 1469 + else 1470 + break; 1471 + } 1472 + finish_wait(&delayed_root->wait, &__wait); 1452 1473 } 1453 1474 1454 - btrfs_wq_run_delayed_node(delayed_root, root, 0); 1475 + btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH); 1455 1476 } 1456 1477 1457 1478 /* Will return 0 or -ENOMEM */
+2
fs/btrfs/delayed-inode.h
··· 43 43 */ 44 44 struct list_head prepare_list; 45 45 atomic_t items; /* for delayed items */ 46 + atomic_t items_seq; /* for delayed items */ 46 47 int nodes; /* for delayed nodes */ 47 48 wait_queue_head_t wait; 48 49 }; ··· 87 86 struct btrfs_delayed_root *delayed_root) 88 87 { 89 88 atomic_set(&delayed_root->items, 0); 89 + atomic_set(&delayed_root->items_seq, 0); 90 90 delayed_root->nodes = 0; 91 91 spin_lock_init(&delayed_root->lock); 92 92 init_waitqueue_head(&delayed_root->wait);
+7 -9
fs/btrfs/disk-io.c
··· 62 62 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 63 63 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 64 64 struct btrfs_root *root); 65 - static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); 65 + static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t); 66 66 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 67 67 static int btrfs_destroy_marked_extents(struct btrfs_root *root, 68 68 struct extent_io_tree *dirty_pages, ··· 3687 3687 return ret; 3688 3688 } 3689 3689 3690 - static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) 3690 + static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t) 3691 3691 { 3692 3692 struct btrfs_pending_snapshot *snapshot; 3693 3693 struct list_head splice; ··· 3700 3700 snapshot = list_entry(splice.next, 3701 3701 struct btrfs_pending_snapshot, 3702 3702 list); 3703 - 3703 + snapshot->error = -ECANCELED; 3704 3704 list_del_init(&snapshot->list); 3705 - 3706 - kfree(snapshot); 3707 3705 } 3708 3706 } 3709 3707 ··· 3838 3840 cur_trans->blocked = 1; 3839 3841 wake_up(&root->fs_info->transaction_blocked_wait); 3840 3842 3843 + btrfs_evict_pending_snapshots(cur_trans); 3844 + 3841 3845 cur_trans->blocked = 0; 3842 3846 wake_up(&root->fs_info->transaction_wait); 3843 3847 ··· 3848 3848 3849 3849 btrfs_destroy_delayed_inodes(root); 3850 3850 btrfs_assert_delayed_root_empty(root); 3851 - 3852 - btrfs_destroy_pending_snapshots(cur_trans); 3853 3851 3854 3852 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, 3855 3853 EXTENT_DIRTY); ··· 3892 3894 if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 3893 3895 wake_up(&root->fs_info->transaction_blocked_wait); 3894 3896 3897 + btrfs_evict_pending_snapshots(t); 3898 + 3895 3899 t->blocked = 0; 3896 3900 smp_mb(); 3897 3901 if (waitqueue_active(&root->fs_info->transaction_wait)) ··· 3906 3906 3907 3907 btrfs_destroy_delayed_inodes(root); 3908 3908 btrfs_assert_delayed_root_empty(root); 3909 - 3910 - btrfs_destroy_pending_snapshots(t); 3911 3909 3912 3910 btrfs_destroy_delalloc_inodes(root); 3913 3911
+4 -2
fs/btrfs/inode.c
··· 8502 8502 struct btrfs_key ins; 8503 8503 u64 cur_offset = start; 8504 8504 u64 i_size; 8505 + u64 cur_bytes; 8505 8506 int ret = 0; 8506 8507 bool own_trans = true; 8507 8508 ··· 8517 8516 } 8518 8517 } 8519 8518 8520 - ret = btrfs_reserve_extent(trans, root, 8521 - min(num_bytes, 256ULL * 1024 * 1024), 8519 + cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 8520 + cur_bytes = max(cur_bytes, min_size); 8521 + ret = btrfs_reserve_extent(trans, root, cur_bytes, 8522 8522 min_size, 0, *alloc_hint, &ins, 1); 8523 8523 if (ret) { 8524 8524 if (own_trans)
+5 -13
fs/btrfs/ioctl.c
··· 527 527 if (async_transid) { 528 528 *async_transid = trans->transid; 529 529 err = btrfs_commit_transaction_async(trans, root, 1); 530 + if (err) 531 + err = btrfs_commit_transaction(trans, root); 530 532 } else { 531 533 err = btrfs_commit_transaction(trans, root); 532 534 } ··· 594 592 *async_transid = trans->transid; 595 593 ret = btrfs_commit_transaction_async(trans, 596 594 root->fs_info->extent_root, 1); 595 + if (ret) 596 + ret = btrfs_commit_transaction(trans, root); 597 597 } else { 598 598 ret = btrfs_commit_transaction(trans, 599 599 root->fs_info->extent_root); 600 600 } 601 - if (ret) { 602 - /* cleanup_transaction has freed this for us */ 603 - if (trans->aborted) 604 - pending_snapshot = NULL; 601 + if (ret) 605 602 goto fail; 606 - } 607 603 608 604 ret = pending_snapshot->error; 609 605 if (ret) ··· 2245 2245 if (ret) 2246 2246 return ret; 2247 2247 2248 - if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2249 - 1)) { 2250 - pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2251 - mnt_drop_write_file(file); 2252 - return -EINVAL; 2253 - } 2254 - 2255 2248 if (btrfs_root_readonly(root)) { 2256 2249 ret = -EROFS; 2257 2250 goto out; ··· 2299 2306 ret = -EINVAL; 2300 2307 } 2301 2308 out: 2302 - atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2303 2309 mnt_drop_write_file(file); 2304 2310 return ret; 2305 2311 }
+57 -17
fs/btrfs/relocation.c
··· 1269 1269 } 1270 1270 spin_unlock(&rc->reloc_root_tree.lock); 1271 1271 1272 + if (!node) 1273 + return 0; 1272 1274 BUG_ON((struct btrfs_root *)node->data != root); 1273 1275 1274 1276 if (!del) { ··· 2240 2238 } 2241 2239 2242 2240 static noinline_for_stack 2241 + void free_reloc_roots(struct list_head *list) 2242 + { 2243 + struct btrfs_root *reloc_root; 2244 + 2245 + while (!list_empty(list)) { 2246 + reloc_root = list_entry(list->next, struct btrfs_root, 2247 + root_list); 2248 + __update_reloc_root(reloc_root, 1); 2249 + free_extent_buffer(reloc_root->node); 2250 + free_extent_buffer(reloc_root->commit_root); 2251 + kfree(reloc_root); 2252 + } 2253 + } 2254 + 2255 + static noinline_for_stack 2243 2256 int merge_reloc_roots(struct reloc_control *rc) 2244 2257 { 2245 2258 struct btrfs_root *root; 2246 2259 struct btrfs_root *reloc_root; 2247 2260 LIST_HEAD(reloc_roots); 2248 2261 int found = 0; 2249 - int ret; 2262 + int ret = 0; 2250 2263 again: 2251 2264 root = rc->extent_root; 2252 2265 ··· 2287 2270 BUG_ON(root->reloc_root != reloc_root); 2288 2271 2289 2272 ret = merge_reloc_root(rc, root); 2290 - BUG_ON(ret); 2273 + if (ret) 2274 + goto out; 2291 2275 } else { 2292 2276 list_del_init(&reloc_root->root_list); 2293 2277 } 2294 2278 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2295 - BUG_ON(ret < 0); 2279 + if (ret < 0) { 2280 + if (list_empty(&reloc_root->root_list)) 2281 + list_add_tail(&reloc_root->root_list, 2282 + &reloc_roots); 2283 + goto out; 2284 + } 2296 2285 } 2297 2286 2298 2287 if (found) { 2299 2288 found = 0; 2300 2289 goto again; 2301 2290 } 2291 + out: 2292 + if (ret) { 2293 + btrfs_std_error(root->fs_info, ret); 2294 + if (!list_empty(&reloc_roots)) 2295 + free_reloc_roots(&reloc_roots); 2296 + } 2297 + 2302 2298 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2303 - return 0; 2299 + return ret; 2304 2300 } 2305 2301 2306 2302 static void free_block_list(struct rb_root *blocks) ··· 2848 2818 int err = 0; 2849 2819 2850 2820 path = btrfs_alloc_path(); 2851 - if (!path) 2852 - return -ENOMEM; 2821 + if (!path) { 2822 + err = -ENOMEM; 2823 + goto out_path; 2824 + } 2853 2825 2854 2826 rb_node = rb_first(blocks); 2855 2827 while (rb_node) { ··· 2890 2858 rb_node = rb_next(rb_node); 2891 2859 } 2892 2860 out: 2893 - free_block_list(blocks); 2894 2861 err = finish_pending_nodes(trans, rc, path, err); 2895 2862 2896 2863 btrfs_free_path(path); 2864 + out_path: 2865 + free_block_list(blocks); 2897 2866 return err; 2898 2867 } 2899 2868 ··· 3731 3698 set_reloc_control(rc); 3732 3699 3733 3700 trans = btrfs_join_transaction(rc->extent_root); 3734 - BUG_ON(IS_ERR(trans)); 3701 + if (IS_ERR(trans)) { 3702 + unset_reloc_control(rc); 3703 + /* 3704 + * extent tree is not a ref_cow tree and has no reloc_root to 3705 + * cleanup. And callers are responsible to free the above 3706 + * block rsv. 3707 + */ 3708 + return PTR_ERR(trans); 3709 + } 3735 3710 btrfs_commit_transaction(trans, rc->extent_root); 3736 3711 return 0; 3737 3712 } ··· 3771 3730 while (1) { 3772 3731 progress++; 3773 3732 trans = btrfs_start_transaction(rc->extent_root, 0); 3774 - BUG_ON(IS_ERR(trans)); 3733 + if (IS_ERR(trans)) { 3734 + err = PTR_ERR(trans); 3735 + trans = NULL; 3736 + break; 3737 + } 3775 3738 restart: 3776 3739 if (update_backref_cache(trans, &rc->backref_cache)) { 3777 3740 btrfs_end_transaction(trans, rc->extent_root); ··· 4309 4264 out_free: 4310 4265 kfree(rc); 4311 4266 out: 4312 - while (!list_empty(&reloc_roots)) { 4313 - reloc_root = list_entry(reloc_roots.next, 4314 - struct btrfs_root, root_list); 4315 - list_del(&reloc_root->root_list); 4316 - free_extent_buffer(reloc_root->node); 4317 - free_extent_buffer(reloc_root->commit_root); 4318 - kfree(reloc_root); 4319 - } 4267 + if (!list_empty(&reloc_roots)) 4268 + free_reloc_roots(&reloc_roots); 4269 + 4320 4270 btrfs_free_path(path); 4321 4271 4322 4272 if (err == 0) {
+40 -25
fs/btrfs/transaction.c
··· 1052 1052 1053 1053 /* 1054 1054 * new snapshots need to be created at a very specific time in the 1055 - * transaction commit. This does the actual creation 1055 + * transaction commit. This does the actual creation. 1056 + * 1057 + * Note: 1058 + * If the error which may affect the commitment of the current transaction 1059 + * happens, we should return the error number. If the error which just affect 1060 + * the creation of the pending snapshots, just return 0. 1056 1061 */ 1057 1062 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1058 1063 struct btrfs_fs_info *fs_info, ··· 1076 1071 struct extent_buffer *tmp; 1077 1072 struct extent_buffer *old; 1078 1073 struct timespec cur_time = CURRENT_TIME; 1079 - int ret; 1074 + int ret = 0; 1080 1075 u64 to_reserve = 0; 1081 1076 u64 index = 0; 1082 1077 u64 objectid; ··· 1085 1080 1086 1081 path = btrfs_alloc_path(); 1087 1082 if (!path) { 1088 - ret = pending->error = -ENOMEM; 1089 - return ret; 1083 + pending->error = -ENOMEM; 1084 + return 0; 1090 1085 } 1091 1086 1092 1087 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1093 1088 if (!new_root_item) { 1094 - ret = pending->error = -ENOMEM; 1089 + pending->error = -ENOMEM; 1095 1090 goto root_item_alloc_fail; 1096 1091 } 1097 1092 1098 - ret = btrfs_find_free_objectid(tree_root, &objectid); 1099 - if (ret) { 1100 - pending->error = ret; 1093 + pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1094 + if (pending->error) 1101 1095 goto no_free_objectid; 1102 - } 1103 1096 1104 1097 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 1105 1098 1106 1099 if (to_reserve > 0) { 1107 - ret = btrfs_block_rsv_add(root, &pending->block_rsv, 1108 - to_reserve, 1109 - BTRFS_RESERVE_NO_FLUSH); 1110 - if (ret) { 1111 - pending->error = ret; 1100 + pending->error = btrfs_block_rsv_add(root, 1101 + &pending->block_rsv, 1102 + to_reserve, 1103 + BTRFS_RESERVE_NO_FLUSH); 1104 + if (pending->error) 1112 1105 goto no_free_objectid; 1113 - } 1114 1106 } 1115 1107 1116 - ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid, 1117 - objectid, pending->inherit); 1118 - if (ret) { 1119 - pending->error = ret; 1108 + pending->error = btrfs_qgroup_inherit(trans, fs_info, 1109 + root->root_key.objectid, 1110 + objectid, pending->inherit); 1111 + if (pending->error) 1120 1112 goto no_free_objectid; 1121 - } 1122 1113 1123 1114 key.objectid = objectid; 1124 1115 key.offset = (u64)-1; ··· 1142 1141 dentry->d_name.len, 0); 1143 1142 if (dir_item != NULL && !IS_ERR(dir_item)) { 1144 1143 pending->error = -EEXIST; 1145 - goto fail; 1144 + goto dir_item_existed; 1146 1145 } else if (IS_ERR(dir_item)) { 1147 1146 ret = PTR_ERR(dir_item); 1148 1147 btrfs_abort_transaction(trans, root, ret); ··· 1273 1272 if (ret) 1274 1273 btrfs_abort_transaction(trans, root, ret); 1275 1274 fail: 1275 + pending->error = ret; 1276 + dir_item_existed: 1276 1277 trans->block_rsv = rsv; 1277 1278 trans->bytes_reserved = 0; 1278 1279 no_free_objectid: ··· 1290 1287 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1291 1288 struct btrfs_fs_info *fs_info) 1292 1289 { 1293 - struct btrfs_pending_snapshot *pending; 1290 + struct btrfs_pending_snapshot *pending, *next; 1294 1291 struct list_head *head = &trans->transaction->pending_snapshots; 1292 + int ret = 0; 1295 1293 1296 - list_for_each_entry(pending, head, list) 1297 - create_pending_snapshot(trans, fs_info, pending); 1298 - return 0; 1294 + list_for_each_entry_safe(pending, next, head, list) { 1295 + list_del(&pending->list); 1296 + ret = create_pending_snapshot(trans, fs_info, pending); 1297 + if (ret) 1298 + break; 1299 + } 1300 + return ret; 1299 1301 } 1300 1302 1301 1303 static void update_super_roots(struct btrfs_root *root) ··· 1456 1448 btrfs_abort_transaction(trans, root, err); 1457 1449 1458 1450 spin_lock(&root->fs_info->trans_lock); 1451 + 1452 + if (list_empty(&cur_trans->list)) { 1453 + spin_unlock(&root->fs_info->trans_lock); 1454 + btrfs_end_transaction(trans, root); 1455 + return; 1456 + } 1457 + 1459 1458 list_del_init(&cur_trans->list); 1460 1459 if (cur_trans == root->fs_info->running_transaction) { 1461 1460 root->fs_info->trans_no_join = 1;
+4 -1
fs/btrfs/tree-log.c
··· 1382 1382 1383 1383 btrfs_release_path(path); 1384 1384 if (ret == 0) { 1385 - btrfs_inc_nlink(inode); 1385 + if (!inode->i_nlink) 1386 + set_nlink(inode, 1); 1387 + else 1388 + btrfs_inc_nlink(inode); 1386 1389 ret = btrfs_update_inode(trans, root, inode); 1387 1390 } else if (ret == -EEXIST) { 1388 1391 ret = 0;
+12 -2
fs/btrfs/volumes.c
··· 2379 2379 return ret; 2380 2380 2381 2381 trans = btrfs_start_transaction(root, 0); 2382 - BUG_ON(IS_ERR(trans)); 2382 + if (IS_ERR(trans)) { 2383 + ret = PTR_ERR(trans); 2384 + btrfs_std_error(root->fs_info, ret); 2385 + return ret; 2386 + } 2383 2387 2384 2388 lock_chunks(root); 2385 2389 ··· 3054 3050 3055 3051 unset_balance_control(fs_info); 3056 3052 ret = del_balance_item(fs_info->tree_root); 3057 - BUG_ON(ret); 3053 + if (ret) 3054 + btrfs_std_error(fs_info, ret); 3058 3055 3059 3056 atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3060 3057 } ··· 3233 3228 if (bargs) { 3234 3229 memset(bargs, 0, sizeof(*bargs)); 3235 3230 update_ioctl_balance_args(fs_info, 0, bargs); 3231 + } 3232 + 3233 + if ((ret && ret != -ECANCELED && ret != -ENOSPC) || 3234 + balance_need_close(fs_info)) { 3235 + __cancel_balance(fs_info); 3236 3236 } 3237 3237 3238 3238 wake_up(&fs_info->balance_wait_q);