@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
1131
1131
if (unlikely (!trans -> srcu_held ))
1132
1132
bch2_trans_srcu_lock (trans );
1133
1133
1134
+ trace_btree_path_traverse_start (trans , path );
1135
+
1134
1136
/*
1135
1137
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
1136
1138
* and re-traverse the path without a transaction restart:
@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
1194
1196
1195
1197
out_uptodate :
1196
1198
path -> uptodate = BTREE_ITER_UPTODATE ;
1199
+ trace_btree_path_traverse_end (trans , path );
1197
1200
out :
1198
1201
if (bch2_err_matches (ret , BCH_ERR_transaction_restart ) != !!trans -> restarted )
1199
1202
panic ("ret %s (%i) trans->restarted %s (%i)\n" ,
@@ -1236,8 +1239,10 @@ __flatten
1236
1239
btree_path_idx_t __bch2_btree_path_make_mut (struct btree_trans * trans ,
1237
1240
btree_path_idx_t path , bool intent , unsigned long ip )
1238
1241
{
1242
+ struct btree_path * old = trans -> paths + path ;
1239
1243
__btree_path_put (trans , trans -> paths + path , intent );
1240
1244
path = btree_path_clone (trans , path , intent , ip );
1245
+ trace_btree_path_clone (trans , old , trans -> paths + path );
1241
1246
trans -> paths [path ].preserve = false;
1242
1247
return path ;
1243
1248
}
@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
1252
1257
bch2_trans_verify_not_in_restart (trans );
1253
1258
EBUG_ON (!trans -> paths [path_idx ].ref );
1254
1259
1260
+ trace_btree_path_set_pos (trans , trans -> paths + path_idx , & new_pos );
1261
+
1255
1262
path_idx = bch2_btree_path_make_mut (trans , path_idx , intent , ip );
1256
1263
1257
1264
struct btree_path * path = trans -> paths + path_idx ;
@@ -1368,6 +1375,8 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
1368
1375
? have_path_at_pos (trans , path )
1369
1376
: have_node_at_pos (trans , path );
1370
1377
1378
+ trace_btree_path_free (trans , path_idx , dup );
1379
+
1371
1380
if (!dup && !(!path -> preserve && !is_btree_node (path , path -> level )))
1372
1381
return ;
1373
1382
@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1421
1430
noinline __cold
1422
1431
void bch2_trans_updates_to_text (struct printbuf * buf , struct btree_trans * trans )
1423
1432
{
1424
- prt_printf (buf , "transaction updates for %s journal seq %llu\n" ,
1425
- trans -> fn , trans -> journal_res .seq );
1433
+ prt_printf (buf , "%u transaction updates for %s journal seq %llu\n" ,
1434
+ trans -> nr_updates , trans -> fn , trans -> journal_res .seq );
1426
1435
printbuf_indent_add (buf , 2 );
1427
1436
1428
1437
trans_for_each_update (trans , i ) {
@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
1464
1473
{
1465
1474
struct btree_path * path = trans -> paths + path_idx ;
1466
1475
1467
- prt_printf (out , "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos " ,
1476
+ prt_printf (out , "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos " ,
1468
1477
path_idx , path -> ref , path -> intent_ref ,
1469
1478
path -> preserve ? 'P' : ' ' ,
1470
1479
path -> should_be_locked ? 'S' : ' ' ,
@@ -1716,6 +1725,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1716
1725
trans -> paths [path_pos ].cached == cached &&
1717
1726
trans -> paths [path_pos ].btree_id == btree_id &&
1718
1727
trans -> paths [path_pos ].level == level ) {
1728
+ trace_btree_path_get (trans , trans -> paths + path_pos , & pos );
1729
+
1719
1730
__btree_path_get (trans , trans -> paths + path_pos , intent );
1720
1731
path_idx = bch2_btree_path_set_pos (trans , path_pos , pos , intent , ip );
1721
1732
path = trans -> paths + path_idx ;
@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1738
1749
path -> ip_allocated = ip ;
1739
1750
#endif
1740
1751
trans -> paths_sorted = false;
1752
+
1753
+ trace_btree_path_alloc (trans , path );
1741
1754
}
1742
1755
1743
1756
if (!(flags & BTREE_ITER_nopreserve ))
@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
1857
1870
1858
1871
struct btree_path * path = btree_iter_path (trans , iter );
1859
1872
if (btree_path_node (path , path -> level ))
1860
- btree_path_set_should_be_locked (path );
1873
+ btree_path_set_should_be_locked (trans , path );
1861
1874
return 0 ;
1862
1875
}
1863
1876
@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1889
1902
iter -> path = bch2_btree_path_set_pos (trans , iter -> path , b -> key .k .p ,
1890
1903
iter -> flags & BTREE_ITER_intent ,
1891
1904
btree_iter_ip_allocated (iter ));
1892
- btree_path_set_should_be_locked (btree_iter_path (trans , iter ));
1905
+ btree_path_set_should_be_locked (trans , btree_iter_path (trans , iter ));
1893
1906
out :
1894
1907
bch2_btree_iter_verify_entry_exit (iter );
1895
1908
bch2_btree_iter_verify (iter );
@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1983
1996
iter -> path = bch2_btree_path_set_pos (trans , iter -> path , b -> key .k .p ,
1984
1997
iter -> flags & BTREE_ITER_intent ,
1985
1998
btree_iter_ip_allocated (iter ));
1986
- btree_path_set_should_be_locked (btree_iter_path (trans , iter ));
1999
+ btree_path_set_should_be_locked (trans , btree_iter_path (trans , iter ));
1987
2000
EBUG_ON (btree_iter_path (trans , iter )-> uptodate );
1988
2001
out :
1989
2002
bch2_btree_iter_verify_entry_exit (iter );
@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
2155
2168
if (unlikely (ret ))
2156
2169
return bkey_s_c_err (ret );
2157
2170
2158
- btree_path_set_should_be_locked (trans -> paths + iter -> key_cache_path );
2171
+ btree_path_set_should_be_locked (trans , trans -> paths + iter -> key_cache_path );
2159
2172
2160
2173
k = bch2_btree_path_peek_slot (trans -> paths + iter -> key_cache_path , & u );
2161
2174
if (k .k && !bkey_err (k )) {
@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
2199
2212
goto out ;
2200
2213
}
2201
2214
2202
- btree_path_set_should_be_locked (path );
2215
+ btree_path_set_should_be_locked (trans , path );
2203
2216
2204
2217
k = btree_path_level_peek_all (trans -> c , l , & iter -> k );
2205
2218
@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
2382
2395
iter -> flags & BTREE_ITER_intent ,
2383
2396
btree_iter_ip_allocated (iter ));
2384
2397
2385
- btree_path_set_should_be_locked (btree_iter_path (trans , iter ));
2398
+ btree_path_set_should_be_locked (trans , btree_iter_path (trans , iter ));
2386
2399
out_no_locked :
2387
2400
if (iter -> update_path ) {
2388
2401
ret = bch2_btree_path_relock (trans , trans -> paths + iter -> update_path , _THIS_IP_ );
2389
2402
if (unlikely (ret ))
2390
2403
k = bkey_s_c_err (ret );
2391
2404
else
2392
- btree_path_set_should_be_locked (trans -> paths + iter -> update_path );
2405
+ btree_path_set_should_be_locked (trans , trans -> paths + iter -> update_path );
2393
2406
}
2394
2407
2395
2408
if (!(iter -> flags & BTREE_ITER_all_snapshots ))
@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2511
2524
iter -> flags & BTREE_ITER_intent ,
2512
2525
_THIS_IP_ );
2513
2526
path = btree_iter_path (trans , iter );
2527
+ trace_btree_path_save_pos (trans , path , trans -> paths + saved_path );
2514
2528
saved_k = * k .k ;
2515
2529
saved_v = k .v ;
2516
2530
}
@@ -2527,7 +2541,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2527
2541
continue ;
2528
2542
}
2529
2543
2530
- btree_path_set_should_be_locked (path );
2544
+ btree_path_set_should_be_locked (trans , path );
2531
2545
break ;
2532
2546
} else if (likely (!bpos_eq (path -> l [0 ].b -> data -> min_key , POS_MIN ))) {
2533
2547
/* Advance to previous leaf node: */
@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2685
2699
}
2686
2700
}
2687
2701
out :
2688
- btree_path_set_should_be_locked (btree_iter_path (trans , iter ));
2702
+ btree_path_set_should_be_locked (trans , btree_iter_path (trans , iter ));
2689
2703
out_no_locked :
2690
2704
bch2_btree_iter_verify_entry_exit (iter );
2691
2705
bch2_btree_iter_verify (iter );
0 commit comments