@@ -143,6 +143,34 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
143
143
return NULL ;
144
144
}
145
145
146
+ static struct btrfs_delayed_ref_node * tree_insert (struct rb_root * root ,
147
+ struct btrfs_delayed_ref_node * ins )
148
+ {
149
+ struct rb_node * * p = & root -> rb_node ;
150
+ struct rb_node * node = & ins -> ref_node ;
151
+ struct rb_node * parent_node = NULL ;
152
+ struct btrfs_delayed_ref_node * entry ;
153
+
154
+ while (* p ) {
155
+ int comp ;
156
+
157
+ parent_node = * p ;
158
+ entry = rb_entry (parent_node , struct btrfs_delayed_ref_node ,
159
+ ref_node );
160
+ comp = comp_refs (ins , entry , true);
161
+ if (comp < 0 )
162
+ p = & (* p )-> rb_left ;
163
+ else if (comp > 0 )
164
+ p = & (* p )-> rb_right ;
165
+ else
166
+ return entry ;
167
+ }
168
+
169
+ rb_link_node (node , parent_node , p );
170
+ rb_insert_color (node , root );
171
+ return NULL ;
172
+ }
173
+
146
174
/*
147
175
* find an head entry based on bytenr. This returns the delayed ref
148
176
* head if it was able to find one, or NULL if nothing was in that spot.
@@ -212,7 +240,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
212
240
struct btrfs_delayed_ref_node * ref )
213
241
{
214
242
assert_spin_locked (& head -> lock );
215
- list_del (& ref -> list );
243
+ rb_erase (& ref -> ref_node , & head -> ref_tree );
244
+ RB_CLEAR_NODE (& ref -> ref_node );
216
245
if (!list_empty (& ref -> add_list ))
217
246
list_del (& ref -> add_list );
218
247
ref -> in_tree = 0 ;
@@ -229,24 +258,18 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
229
258
u64 seq )
230
259
{
231
260
struct btrfs_delayed_ref_node * next ;
261
+ struct rb_node * node = rb_next (& ref -> ref_node );
232
262
bool done = false;
233
263
234
- next = list_first_entry (& head -> ref_list , struct btrfs_delayed_ref_node ,
235
- list );
236
- while (!done && & next -> list != & head -> ref_list ) {
264
+ while (!done && node ) {
237
265
int mod ;
238
- struct btrfs_delayed_ref_node * next2 ;
239
-
240
- next2 = list_next_entry (next , list );
241
-
242
- if (next == ref )
243
- goto next ;
244
266
267
+ next = rb_entry (node , struct btrfs_delayed_ref_node , ref_node );
268
+ node = rb_next (node );
245
269
if (seq && next -> seq >= seq )
246
- goto next ;
247
-
270
+ break ;
248
271
if (comp_refs (ref , next , false))
249
- goto next ;
272
+ break ;
250
273
251
274
if (ref -> action == next -> action ) {
252
275
mod = next -> ref_mod ;
@@ -270,8 +293,6 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
270
293
WARN_ON (ref -> type == BTRFS_TREE_BLOCK_REF_KEY ||
271
294
ref -> type == BTRFS_SHARED_BLOCK_REF_KEY );
272
295
}
273
- next :
274
- next = next2 ;
275
296
}
276
297
277
298
return done ;
@@ -283,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
283
304
struct btrfs_delayed_ref_head * head )
284
305
{
285
306
struct btrfs_delayed_ref_node * ref ;
307
+ struct rb_node * node ;
286
308
u64 seq = 0 ;
287
309
288
310
assert_spin_locked (& head -> lock );
289
311
290
- if (list_empty (& head -> ref_list ))
312
+ if (RB_EMPTY_ROOT (& head -> ref_tree ))
291
313
return ;
292
314
293
315
/* We don't have too many refs to merge for data. */
@@ -304,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
304
326
}
305
327
spin_unlock (& fs_info -> tree_mod_seq_lock );
306
328
307
- ref = list_first_entry ( & head -> ref_list , struct btrfs_delayed_ref_node ,
308
- list );
309
- while ( & ref -> list != & head -> ref_list ) {
329
+ again :
330
+ for ( node = rb_first ( & head -> ref_tree ); node ; node = rb_next ( node )) {
331
+ ref = rb_entry ( node , struct btrfs_delayed_ref_node , ref_node );
310
332
if (seq && ref -> seq >= seq )
311
- goto next ;
312
-
313
- if (merge_ref (trans , delayed_refs , head , ref , seq )) {
314
- if (list_empty (& head -> ref_list ))
315
- break ;
316
- ref = list_first_entry (& head -> ref_list ,
317
- struct btrfs_delayed_ref_node ,
318
- list );
319
333
continue ;
320
- }
321
- next :
322
- ref = list_next_entry (ref , list );
334
+ if (merge_ref (trans , delayed_refs , head , ref , seq ))
335
+ goto again ;
323
336
}
324
337
}
325
338
@@ -402,25 +415,19 @@ btrfs_select_ref_head(struct btrfs_trans_handle *trans)
402
415
* Return 0 for insert.
403
416
* Return >0 for merge.
404
417
*/
405
- static int
406
- add_delayed_ref_tail_merge (struct btrfs_trans_handle * trans ,
407
- struct btrfs_delayed_ref_root * root ,
408
- struct btrfs_delayed_ref_head * href ,
409
- struct btrfs_delayed_ref_node * ref )
418
+ static int insert_delayed_ref (struct btrfs_trans_handle * trans ,
419
+ struct btrfs_delayed_ref_root * root ,
420
+ struct btrfs_delayed_ref_head * href ,
421
+ struct btrfs_delayed_ref_node * ref )
410
422
{
411
423
struct btrfs_delayed_ref_node * exist ;
412
424
int mod ;
413
425
int ret = 0 ;
414
426
415
427
spin_lock (& href -> lock );
416
- /* Check whether we can merge the tail node with ref */
417
- if (list_empty (& href -> ref_list ))
418
- goto add_tail ;
419
- exist = list_entry (href -> ref_list .prev , struct btrfs_delayed_ref_node ,
420
- list );
421
- /* No need to compare bytenr nor is_head */
422
- if (comp_refs (exist , ref , true))
423
- goto add_tail ;
428
+ exist = tree_insert (& href -> ref_tree , ref );
429
+ if (!exist )
430
+ goto inserted ;
424
431
425
432
/* Now we are sure we can merge */
426
433
ret = 1 ;
@@ -451,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
451
458
drop_delayed_ref (trans , root , href , exist );
452
459
spin_unlock (& href -> lock );
453
460
return ret ;
454
-
455
- add_tail :
456
- list_add_tail (& ref -> list , & href -> ref_list );
461
+ inserted :
457
462
if (ref -> action == BTRFS_ADD_DELAYED_REF )
458
463
list_add_tail (& ref -> add_list , & href -> ref_add_list );
459
464
atomic_inc (& root -> num_entries );
@@ -593,7 +598,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
593
598
head_ref -> ref_mod = count_mod ;
594
599
head_ref -> must_insert_reserved = must_insert_reserved ;
595
600
head_ref -> is_data = is_data ;
596
- INIT_LIST_HEAD ( & head_ref -> ref_list ) ;
601
+ head_ref -> ref_tree = RB_ROOT ;
597
602
INIT_LIST_HEAD (& head_ref -> ref_add_list );
598
603
RB_CLEAR_NODE (& head_ref -> href_node );
599
604
head_ref -> processing = 0 ;
@@ -685,7 +690,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
685
690
ref -> is_head = 0 ;
686
691
ref -> in_tree = 1 ;
687
692
ref -> seq = seq ;
688
- INIT_LIST_HEAD (& ref -> list );
693
+ RB_CLEAR_NODE (& ref -> ref_node );
689
694
INIT_LIST_HEAD (& ref -> add_list );
690
695
691
696
full_ref = btrfs_delayed_node_to_tree_ref (ref );
@@ -699,7 +704,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
699
704
700
705
trace_add_delayed_tree_ref (fs_info , ref , full_ref , action );
701
706
702
- ret = add_delayed_ref_tail_merge (trans , delayed_refs , head_ref , ref );
707
+ ret = insert_delayed_ref (trans , delayed_refs , head_ref , ref );
703
708
704
709
/*
705
710
* XXX: memory should be freed at the same level allocated.
@@ -742,7 +747,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
742
747
ref -> is_head = 0 ;
743
748
ref -> in_tree = 1 ;
744
749
ref -> seq = seq ;
745
- INIT_LIST_HEAD (& ref -> list );
750
+ RB_CLEAR_NODE (& ref -> ref_node );
746
751
INIT_LIST_HEAD (& ref -> add_list );
747
752
748
753
full_ref = btrfs_delayed_node_to_data_ref (ref );
@@ -758,8 +763,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
758
763
759
764
trace_add_delayed_data_ref (fs_info , ref , full_ref , action );
760
765
761
- ret = add_delayed_ref_tail_merge (trans , delayed_refs , head_ref , ref );
762
-
766
+ ret = insert_delayed_ref (trans , delayed_refs , head_ref , ref );
763
767
if (ret > 0 )
764
768
kmem_cache_free (btrfs_delayed_data_ref_cachep , full_ref );
765
769
}
0 commit comments