Skip to content

Commit b6d01b6

Browse files
walken-googlesfrothwell
authored andcommitted
lib/rbtree: avoid generating code twice for the cached versions
As was already noted in rbtree.h, the logic to cache rb_first (or rb_last) can easily be implemented externally to the core rbtree api. Change the implementation to do just that. Previously the update of rb_leftmost was wired deeper into the implmentation, but there were some disadvantages to that - mostly, lib/rbtree.c had separate instantiations for rb_insert_color() vs rb_insert_color_cached(), as well as rb_erase() vs rb_erase_cached(), which were doing exactly the same thing save for the rb_leftmost update at the start of either function. text data bss dec hex filename 5405 120 0 5525 1595 lib/rbtree.o-vanilla 3827 96 0 3923 f53 lib/rbtree.o-patch [[email protected]: changelog addition] Link: http://lkml.kernel.org/r/20190628171416.by5gdizl3rcxk5h5@linux-r8p5 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Michel Lespinasse <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Stephen Rothwell <[email protected]>
1 parent 6ea9215 commit b6d01b6

File tree

3 files changed

+59
-78
lines changed

3 files changed

+59
-78
lines changed

include/linux/rbtree.h

Lines changed: 46 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -32,25 +32,9 @@ struct rb_root {
3232
struct rb_node *rb_node;
3333
};
3434

35-
/*
36-
* Leftmost-cached rbtrees.
37-
*
38-
* We do not cache the rightmost node based on footprint
39-
* size vs number of potential users that could benefit
40-
* from O(1) rb_last(). Just not worth it, users that want
41-
* this feature can always implement the logic explicitly.
42-
* Furthermore, users that want to cache both pointers may
43-
* find it a bit asymmetric, but that's ok.
44-
*/
45-
struct rb_root_cached {
46-
struct rb_root rb_root;
47-
struct rb_node *rb_leftmost;
48-
};
49-
5035
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
5136

5237
#define RB_ROOT (struct rb_root) { NULL, }
53-
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
5438
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
5539

5640
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
7256
extern struct rb_node *rb_first(const struct rb_root *);
7357
extern struct rb_node *rb_last(const struct rb_root *);
7458

75-
extern void rb_insert_color_cached(struct rb_node *,
76-
struct rb_root_cached *, bool);
77-
extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
78-
/* Same as rb_first(), but O(1) */
79-
#define rb_first_cached(root) (root)->rb_leftmost
80-
8159
/* Postorder iteration - always visit the parent after its children */
8260
extern struct rb_node *rb_first_postorder(const struct rb_root *);
8361
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
8765
struct rb_root *root);
8866
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
8967
struct rb_root *root);
90-
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
91-
struct rb_root_cached *root);
9268

9369
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
9470
struct rb_node **rb_link)
@@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
136112
typeof(*pos), field); 1; }); \
137113
pos = n)
138114

115+
/*
116+
* Leftmost-cached rbtrees.
117+
*
118+
* We do not cache the rightmost node based on footprint
119+
* size vs number of potential users that could benefit
120+
* from O(1) rb_last(). Just not worth it, users that want
121+
* this feature can always implement the logic explicitly.
122+
* Furthermore, users that want to cache both pointers may
123+
* find it a bit asymmetric, but that's ok.
124+
*/
125+
struct rb_root_cached {
126+
struct rb_root rb_root;
127+
struct rb_node *rb_leftmost;
128+
};
129+
130+
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
131+
132+
/* Same as rb_first(), but O(1) */
133+
#define rb_first_cached(root) (root)->rb_leftmost
134+
135+
static inline void rb_insert_color_cached(struct rb_node *node,
136+
struct rb_root_cached *root,
137+
bool leftmost)
138+
{
139+
if (leftmost)
140+
root->rb_leftmost = node;
141+
rb_insert_color(node, &root->rb_root);
142+
}
143+
144+
static inline void rb_erase_cached(struct rb_node *node,
145+
struct rb_root_cached *root)
146+
{
147+
if (root->rb_leftmost == node)
148+
root->rb_leftmost = rb_next(node);
149+
rb_erase(node, &root->rb_root);
150+
}
151+
152+
static inline void rb_replace_node_cached(struct rb_node *victim,
153+
struct rb_node *new,
154+
struct rb_root_cached *root)
155+
{
156+
if (root->rb_leftmost == victim)
157+
root->rb_leftmost = new;
158+
rb_replace_node(victim, new, &root->rb_root);
159+
}
160+
139161
#endif /* _LINUX_RBTREE_H */

include/linux/rbtree_augmented.h

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,9 @@ struct rb_augment_callbacks {
3030
void (*rotate)(struct rb_node *old, struct rb_node *new);
3131
};
3232

33-
extern void __rb_insert_augmented(struct rb_node *node,
34-
struct rb_root *root,
35-
bool newleft, struct rb_node **leftmost,
33+
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
3634
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
35+
3736
/*
3837
* Fixup the rbtree and update the augmented information when rebalancing.
3938
*
@@ -48,16 +47,17 @@ static inline void
4847
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
4948
const struct rb_augment_callbacks *augment)
5049
{
51-
__rb_insert_augmented(node, root, false, NULL, augment->rotate);
50+
__rb_insert_augmented(node, root, augment->rotate);
5251
}
5352

5453
static inline void
5554
rb_insert_augmented_cached(struct rb_node *node,
5655
struct rb_root_cached *root, bool newleft,
5756
const struct rb_augment_callbacks *augment)
5857
{
59-
__rb_insert_augmented(node, &root->rb_root,
60-
newleft, &root->rb_leftmost, augment->rotate);
58+
if (newleft)
59+
root->rb_leftmost = node;
60+
rb_insert_augmented(node, &root->rb_root, augment);
6161
}
6262

6363
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -150,17 +150,13 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
150150

151151
static __always_inline struct rb_node *
152152
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
153-
struct rb_node **leftmost,
154153
const struct rb_augment_callbacks *augment)
155154
{
156155
struct rb_node *child = node->rb_right;
157156
struct rb_node *tmp = node->rb_left;
158157
struct rb_node *parent, *rebalance;
159158
unsigned long pc;
160159

161-
if (leftmost && node == *leftmost)
162-
*leftmost = rb_next(node);
163-
164160
if (!tmp) {
165161
/*
166162
* Case 1: node to erase has no more than 1 child (easy!)
@@ -260,8 +256,7 @@ static __always_inline void
260256
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
261257
const struct rb_augment_callbacks *augment)
262258
{
263-
struct rb_node *rebalance = __rb_erase_augmented(node, root,
264-
NULL, augment);
259+
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
265260
if (rebalance)
266261
__rb_erase_color(rebalance, root, augment->rotate);
267262
}
@@ -270,11 +265,9 @@ static __always_inline void
270265
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
271266
const struct rb_augment_callbacks *augment)
272267
{
273-
struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
274-
&root->rb_leftmost,
275-
augment);
276-
if (rebalance)
277-
__rb_erase_color(rebalance, &root->rb_root, augment->rotate);
268+
if (root->rb_leftmost == node)
269+
root->rb_leftmost = rb_next(node);
270+
rb_erase_augmented(node, &root->rb_root, augment);
278271
}
279272

280273
#endif /* _LINUX_RBTREE_AUGMENTED_H */

lib/rbtree.c

Lines changed: 3 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
8383

8484
static __always_inline void
8585
__rb_insert(struct rb_node *node, struct rb_root *root,
86-
bool newleft, struct rb_node **leftmost,
8786
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
8887
{
8988
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
9089

91-
if (newleft)
92-
*leftmost = node;
93-
9490
while (true) {
9591
/*
9692
* Loop invariant: node is red.
@@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = {
437433

438434
void rb_insert_color(struct rb_node *node, struct rb_root *root)
439435
{
440-
__rb_insert(node, root, false, NULL, dummy_rotate);
436+
__rb_insert(node, root, dummy_rotate);
441437
}
442438
EXPORT_SYMBOL(rb_insert_color);
443439

444440
void rb_erase(struct rb_node *node, struct rb_root *root)
445441
{
446442
struct rb_node *rebalance;
447-
rebalance = __rb_erase_augmented(node, root,
448-
NULL, &dummy_callbacks);
443+
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
449444
if (rebalance)
450445
____rb_erase_color(rebalance, root, dummy_rotate);
451446
}
452447
EXPORT_SYMBOL(rb_erase);
453448

454-
void rb_insert_color_cached(struct rb_node *node,
455-
struct rb_root_cached *root, bool leftmost)
456-
{
457-
__rb_insert(node, &root->rb_root, leftmost,
458-
&root->rb_leftmost, dummy_rotate);
459-
}
460-
EXPORT_SYMBOL(rb_insert_color_cached);
461-
462-
void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
463-
{
464-
struct rb_node *rebalance;
465-
rebalance = __rb_erase_augmented(node, &root->rb_root,
466-
&root->rb_leftmost, &dummy_callbacks);
467-
if (rebalance)
468-
____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
469-
}
470-
EXPORT_SYMBOL(rb_erase_cached);
471-
472449
/*
473450
* Augmented rbtree manipulation functions.
474451
*
@@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached);
477454
*/
478455

479456
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
480-
bool newleft, struct rb_node **leftmost,
481457
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
482458
{
483-
__rb_insert(node, root, newleft, leftmost, augment_rotate);
459+
__rb_insert(node, root, augment_rotate);
484460
}
485461
EXPORT_SYMBOL(__rb_insert_augmented);
486462

@@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
591567
}
592568
EXPORT_SYMBOL(rb_replace_node);
593569

594-
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
595-
struct rb_root_cached *root)
596-
{
597-
rb_replace_node(victim, new, &root->rb_root);
598-
599-
if (root->rb_leftmost == victim)
600-
root->rb_leftmost = new;
601-
}
602-
EXPORT_SYMBOL(rb_replace_node_cached);
603-
604570
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
605571
struct rb_root *root)
606572
{

0 commit comments

Comments
 (0)