Skip to content

Commit e9b60c7

Browse files
Dave ChinnerDarrick J. Wong
authored andcommitted
pcpcntr: remove percpu_counter_sum_all()
percpu_counter_sum_all() is now redundant as the race condition it was invented to handle is now dealt with by percpu_counter_sum() directly and all users of percpu_counter_sum_all() have been removed. Remove it. This effectively reverts the changes made in f689054 ("percpu_counter: add percpu_counter_sum_all interface") except for the cpumask iteration that fixes percpu_counter_sum() made earlier in this series. Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]>
1 parent 7ba85fb commit e9b60c7

File tree

2 files changed

+11
-35
lines changed

2 files changed

+11
-35
lines changed

include/linux/percpu_counter.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
4545
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
4646
s32 batch);
4747
s64 __percpu_counter_sum(struct percpu_counter *fbc);
48-
s64 percpu_counter_sum_all(struct percpu_counter *fbc);
4948
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
5049
void percpu_counter_sync(struct percpu_counter *fbc);
5150

@@ -196,11 +195,6 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
196195
return percpu_counter_read(fbc);
197196
}
198197

199-
static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
200-
{
201-
return percpu_counter_read(fbc);
202-
}
203-
204198
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
205199
{
206200
return true;

lib/percpu_counter.c

Lines changed: 11 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -122,23 +122,6 @@ void percpu_counter_sync(struct percpu_counter *fbc)
122122
}
123123
EXPORT_SYMBOL(percpu_counter_sync);
124124

125-
static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
126-
const struct cpumask *cpu_mask)
127-
{
128-
s64 ret;
129-
int cpu;
130-
unsigned long flags;
131-
132-
raw_spin_lock_irqsave(&fbc->lock, flags);
133-
ret = fbc->count;
134-
for_each_cpu_or(cpu, cpu_online_mask, cpu_mask) {
135-
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
136-
ret += *pcount;
137-
}
138-
raw_spin_unlock_irqrestore(&fbc->lock, flags);
139-
return ret;
140-
}
141-
142125
/*
143126
* Add up all the per-cpu counts, return the result. This is a more accurate
144127
* but much slower version of percpu_counter_read_positive().
@@ -153,22 +136,21 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
153136
*/
154137
s64 __percpu_counter_sum(struct percpu_counter *fbc)
155138
{
139+
s64 ret;
140+
int cpu;
141+
unsigned long flags;
156142

157-
return __percpu_counter_sum_mask(fbc, cpu_dying_mask);
143+
raw_spin_lock_irqsave(&fbc->lock, flags);
144+
ret = fbc->count;
145+
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
146+
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
147+
ret += *pcount;
148+
}
149+
raw_spin_unlock_irqrestore(&fbc->lock, flags);
150+
return ret;
158151
}
159152
EXPORT_SYMBOL(__percpu_counter_sum);
160153

161-
/*
162-
* This is slower version of percpu_counter_sum as it traverses all possible
163-
* cpus. Use this only in the cases where accurate data is needed in the
164-
* presense of CPUs getting offlined.
165-
*/
166-
s64 percpu_counter_sum_all(struct percpu_counter *fbc)
167-
{
168-
return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
169-
}
170-
EXPORT_SYMBOL(percpu_counter_sum_all);
171-
172154
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
173155
struct lock_class_key *key)
174156
{

0 commit comments

Comments
 (0)