Skip to content

Commit a79e0e7

Browse files
ZqiangNeeraj Upadhyay
authored andcommitted
rcu-tasks: Fix access non-existent percpu rtpcp variable in rcu_tasks_need_gpcb()
For kernels built with CONFIG_FORCE_NR_CPUS=y, the nr_cpu_ids is defined as NR_CPUS instead of the number of possible cpus, this will cause the following system panic: smpboot: Allowing 4 CPUs, 0 hotplug CPUs ... setup_percpu: NR_CPUS:512 nr_cpumask_bits:512 nr_cpu_ids:512 nr_node_ids:1 ... BUG: unable to handle page fault for address: ffffffff9911c8c8 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 0 PID: 15 Comm: rcu_tasks_trace Tainted: G W 6.6.21 #1 5dc7acf91a5e8e9ac9dcfc35bee0245691283ea6 RIP: 0010:rcu_tasks_need_gpcb+0x25d/0x2c0 RSP: 0018:ffffa371c00a3e60 EFLAGS: 00010082 CR2: ffffffff9911c8c8 CR3: 000000040fa20005 CR4: 00000000001706f0 Call Trace: <TASK> ? __die+0x23/0x80 ? page_fault_oops+0xa4/0x180 ? exc_page_fault+0x152/0x180 ? asm_exc_page_fault+0x26/0x40 ? rcu_tasks_need_gpcb+0x25d/0x2c0 ? __pfx_rcu_tasks_kthread+0x40/0x40 rcu_tasks_one_gp+0x69/0x180 rcu_tasks_kthread+0x94/0xc0 kthread+0xe8/0x140 ? __pfx_kthread+0x40/0x40 ret_from_fork+0x34/0x80 ? __pfx_kthread+0x40/0x40 ret_from_fork_asm+0x1b/0x80 </TASK> Considering that there may be holes in the CPU numbers, use the maximum possible cpu number, instead of nr_cpu_ids, for configuring enqueue and dequeue limits. Closes: https://lore.kernel.org/linux-input/CALMA0xaTSMN+p4xUXkzrtR5r6k7hgoswcaXx7baR_z9r5jjskw@mail.gmail.com/T/#u Reported-by: Zhixu Liu <[email protected]> Signed-off-by: Zqiang <[email protected]> Signed-off-by: Neeraj Upadhyay <[email protected]>
1 parent a6b6096 commit a79e0e7

File tree

1 file changed

+51
-29
lines changed

1 file changed

+51
-29
lines changed

kernel/rcu/tasks.h

Lines changed: 51 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ struct rcu_tasks_percpu {
4949
struct list_head rtp_blkd_tasks;
5050
struct list_head rtp_exit_list;
5151
int cpu;
52+
int index;
5253
struct rcu_tasks *rtpp;
5354
};
5455

@@ -110,6 +111,7 @@ struct rcu_tasks {
110111
call_rcu_func_t call_func;
111112
unsigned int wait_state;
112113
struct rcu_tasks_percpu __percpu *rtpcpu;
114+
struct rcu_tasks_percpu **rtpcp_array;
113115
int percpu_enqueue_shift;
114116
int percpu_enqueue_lim;
115117
int percpu_dequeue_lim;
@@ -182,6 +184,8 @@ module_param(rcu_task_collapse_lim, int, 0444);
182184
static int rcu_task_lazy_lim __read_mostly = 32;
183185
module_param(rcu_task_lazy_lim, int, 0444);
184186

187+
static int rcu_task_cpu_ids;
188+
185189
/* RCU tasks grace-period state for debugging. */
186190
#define RTGS_INIT 0
187191
#define RTGS_WAIT_WAIT_CBS 1
@@ -245,6 +249,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
245249
int cpu;
246250
int lim;
247251
int shift;
252+
int maxcpu;
253+
int index = 0;
248254

249255
if (rcu_task_enqueue_lim < 0) {
250256
rcu_task_enqueue_lim = 1;
@@ -254,14 +260,9 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
254260
}
255261
lim = rcu_task_enqueue_lim;
256262

257-
if (lim > nr_cpu_ids)
258-
lim = nr_cpu_ids;
259-
shift = ilog2(nr_cpu_ids / lim);
260-
if (((nr_cpu_ids - 1) >> shift) >= lim)
261-
shift++;
262-
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
263-
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
264-
smp_store_release(&rtp->percpu_enqueue_lim, lim);
263+
rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
264+
BUG_ON(!rtp->rtpcp_array);
265+
265266
for_each_possible_cpu(cpu) {
266267
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
267268

@@ -273,14 +274,29 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
273274
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
274275
rtpcp->cpu = cpu;
275276
rtpcp->rtpp = rtp;
277+
rtpcp->index = index;
278+
rtp->rtpcp_array[index] = rtpcp;
279+
index++;
276280
if (!rtpcp->rtp_blkd_tasks.next)
277281
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
278282
if (!rtpcp->rtp_exit_list.next)
279283
INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
284+
maxcpu = cpu;
280285
}
281286

282-
pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
283-
data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
287+
rcu_task_cpu_ids = maxcpu + 1;
288+
if (lim > rcu_task_cpu_ids)
289+
lim = rcu_task_cpu_ids;
290+
shift = ilog2(rcu_task_cpu_ids / lim);
291+
if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
292+
shift++;
293+
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
294+
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
295+
smp_store_release(&rtp->percpu_enqueue_lim, lim);
296+
297+
pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
298+
rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
299+
rcu_task_cb_adjust, rcu_task_cpu_ids);
284300
}
285301

286302
// Compute wakeup time for lazy callback timer.
@@ -348,7 +364,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
348364
rtpcp->rtp_n_lock_retries = 0;
349365
}
350366
if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
351-
READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
367+
READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
352368
needadjust = true; // Defer adjustment to avoid deadlock.
353369
}
354370
// Queuing callbacks before initialization not yet supported.
@@ -368,10 +384,10 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
368384
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
369385
if (unlikely(needadjust)) {
370386
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
371-
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
387+
if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
372388
WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
373-
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
374-
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
389+
WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
390+
smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
375391
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
376392
}
377393
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
@@ -444,6 +460,8 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
444460

445461
dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
446462
for (cpu = 0; cpu < dequeue_limit; cpu++) {
463+
if (!cpu_possible(cpu))
464+
continue;
447465
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
448466

449467
/* Advance and accelerate any new callbacks. */
@@ -481,7 +499,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
481499
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
482500
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
483501
if (rtp->percpu_enqueue_lim > 1) {
484-
WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
502+
WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
485503
smp_store_release(&rtp->percpu_enqueue_lim, 1);
486504
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
487505
gpdone = false;
@@ -496,7 +514,9 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
496514
pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
497515
}
498516
if (rtp->percpu_dequeue_lim == 1) {
499-
for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
517+
for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
518+
if (!cpu_possible(cpu))
519+
continue;
500520
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
501521

502522
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
@@ -511,30 +531,32 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
511531
// Advance callbacks and invoke any that are ready.
512532
static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
513533
{
514-
int cpu;
515-
int cpunext;
516534
int cpuwq;
517535
unsigned long flags;
518536
int len;
537+
int index;
519538
struct rcu_head *rhp;
520539
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
521540
struct rcu_tasks_percpu *rtpcp_next;
522541

523-
cpu = rtpcp->cpu;
524-
cpunext = cpu * 2 + 1;
525-
if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
526-
rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
527-
cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
528-
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
529-
cpunext++;
530-
if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
531-
rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
532-
cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
542+
index = rtpcp->index * 2 + 1;
543+
if (index < num_possible_cpus()) {
544+
rtpcp_next = rtp->rtpcp_array[index];
545+
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
546+
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
533547
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
548+
index++;
549+
if (index < num_possible_cpus()) {
550+
rtpcp_next = rtp->rtpcp_array[index];
551+
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
552+
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
553+
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
554+
}
555+
}
534556
}
535557
}
536558

537-
if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
559+
if (rcu_segcblist_empty(&rtpcp->cblist))
538560
return;
539561
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
540562
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));

0 commit comments

Comments
 (0)