@@ -135,31 +135,35 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
135
135
#endif
136
136
137
137
/* Caller should verify both ref and tag to be valid */
138
- static inline void __alloc_tag_ref_set (union codetag_ref * ref , struct alloc_tag * tag )
138
+ static inline bool __alloc_tag_ref_set (union codetag_ref * ref , struct alloc_tag * tag )
139
139
{
140
140
alloc_tag_add_check (ref , tag );
141
141
if (!ref || !tag )
142
- return ;
142
+ return false ;
143
143
144
144
ref -> ct = & tag -> ct ;
145
+ return true;
145
146
}
146
147
147
- static inline void alloc_tag_ref_set (union codetag_ref * ref , struct alloc_tag * tag )
148
+ static inline bool alloc_tag_ref_set (union codetag_ref * ref , struct alloc_tag * tag )
148
149
{
149
- __alloc_tag_ref_set (ref , tag );
150
+ if (unlikely (!__alloc_tag_ref_set (ref , tag )))
151
+ return false;
152
+
150
153
/*
151
154
* We need in increment the call counter every time we have a new
152
155
* allocation or when we split a large allocation into smaller ones.
153
156
* Each new reference for every sub-allocation needs to increment call
154
157
* counter because when we free each part the counter will be decremented.
155
158
*/
156
159
this_cpu_inc (tag -> counters -> calls );
160
+ return true;
157
161
}
158
162
159
163
static inline void alloc_tag_add (union codetag_ref * ref , struct alloc_tag * tag , size_t bytes )
160
164
{
161
- alloc_tag_ref_set (ref , tag );
162
- this_cpu_add (tag -> counters -> bytes , bytes );
165
+ if ( likely ( alloc_tag_ref_set (ref , tag )))
166
+ this_cpu_add (tag -> counters -> bytes , bytes );
163
167
}
164
168
165
169
static inline void alloc_tag_sub (union codetag_ref * ref , size_t bytes )
0 commit comments