Skip to content

Commit 0ddf992

Browse files
Daniel Wagner0day robot
authored andcommitted
rcu: Do not call swake_up_all with rnp->lock holding
By moving the rcu_nocb_gp_cleanup() call out of the rnp->lock protected region we avoid a deadlock as lockdep reported. swake_up_all() is toggling IRQ enable/disable. That means we might start processing soft IRQs. __do_softirq() calls rcu_process_callbacks() which wants to grab nrp->lock. ================================= [ INFO: inconsistent lock state ] 4.2.0-rc5-00025-g9a73ba0 torvalds#136 Not tainted --------------------------------- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. rcu_preempt/8 [HC0[0]:SC0[0]:HE1:SE1] takes: (rcu_node_1){+.?...}, at: [<ffffffff811387c7>] rcu_gp_kthread+0xb97/0xeb0 {IN-SOFTIRQ-W} state was registered at: [<ffffffff81109b9f>] __lock_acquire+0xd5f/0x21e0 [<ffffffff8110be0f>] lock_acquire+0xdf/0x2b0 [<ffffffff81841cc9>] _raw_spin_lock_irqsave+0x59/0xa0 [<ffffffff81136991>] rcu_process_callbacks+0x141/0x3c0 [<ffffffff810b1a9d>] __do_softirq+0x14d/0x670 [<ffffffff810b2214>] irq_exit+0x104/0x110 [<ffffffff81844e96>] smp_apic_timer_interrupt+0x46/0x60 [<ffffffff81842e70>] apic_timer_interrupt+0x70/0x80 [<ffffffff810dba66>] rq_attach_root+0xa6/0x100 [<ffffffff810dbc2d>] cpu_attach_domain+0x16d/0x650 [<ffffffff810e4b42>] build_sched_domains+0x942/0xb00 [<ffffffff821777c2>] sched_init_smp+0x509/0x5c1 [<ffffffff821551e3>] kernel_init_freeable+0x172/0x28f [<ffffffff8182cdce>] kernel_init+0xe/0xe0 [<ffffffff8184231f>] ret_from_fork+0x3f/0x70 irq event stamp: 76 hardirqs last enabled at (75): [<ffffffff81841330>] _raw_spin_unlock_irq+0x30/0x60 hardirqs last disabled at (76): [<ffffffff8184116f>] _raw_spin_lock_irq+0x1f/0x90 softirqs last enabled at (0): [<ffffffff810a8df2>] copy_process.part.26+0x602/0x1cf0 softirqs last disabled at (0): [< (null)>] (null) other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(rcu_node_1); <Interrupt> lock(rcu_node_1); *** DEADLOCK *** 1 lock held by rcu_preempt/8: #0: (rcu_node_1){+.?...}, at: [<ffffffff811387c7>] rcu_gp_kthread+0xb97/0xeb0 stack backtrace: CPU: 0 PID: 8 Comm: rcu_preempt Not tainted 4.2.0-rc5-00025-g9a73ba0 torvalds#136 Hardware name: Dell Inc. PowerEdge R820/066N7P, BIOS 2.0.20 01/16/2014 0000000000000000 000000006d7e67d8 ffff881fb081fbd8 ffffffff818379e0 0000000000000000 ffff881fb0812a00 ffff881fb081fc38 ffffffff8110813b 0000000000000000 0000000000000001 ffff881f00000001 ffffffff8102fa4f Call Trace: [<ffffffff818379e0>] dump_stack+0x4f/0x7b [<ffffffff8110813b>] print_usage_bug+0x1db/0x1e0 [<ffffffff8102fa4f>] ? save_stack_trace+0x2f/0x50 [<ffffffff811087ad>] mark_lock+0x66d/0x6e0 [<ffffffff81107790>] ? check_usage_forwards+0x150/0x150 [<ffffffff81108898>] mark_held_locks+0x78/0xa0 [<ffffffff81841330>] ? _raw_spin_unlock_irq+0x30/0x60 [<ffffffff81108a28>] trace_hardirqs_on_caller+0x168/0x220 [<ffffffff81108aed>] trace_hardirqs_on+0xd/0x10 [<ffffffff81841330>] _raw_spin_unlock_irq+0x30/0x60 [<ffffffff810fd1c7>] swake_up_all+0xb7/0xe0 [<ffffffff811386e1>] rcu_gp_kthread+0xab1/0xeb0 [<ffffffff811089bf>] ? trace_hardirqs_on_caller+0xff/0x220 [<ffffffff81841341>] ? _raw_spin_unlock_irq+0x41/0x60 [<ffffffff81137c30>] ? rcu_barrier+0x20/0x20 [<ffffffff810d2014>] kthread+0x104/0x120 [<ffffffff81841330>] ? _raw_spin_unlock_irq+0x30/0x60 [<ffffffff810d1f10>] ? kthread_create_on_node+0x260/0x260 [<ffffffff8184231f>] ret_from_fork+0x3f/0x70 [<ffffffff810d1f10>] ? kthread_create_on_node+0x260/0x260 Signed-off-by: Daniel Wagner <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Josh Triplett <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Lai Jiangshan <[email protected]> Cc: [email protected]
1 parent 57f88b2 commit 0ddf992

File tree

3 files changed

+18
-5
lines changed

3 files changed

+18
-5
lines changed

kernel/rcu/tree.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1568,7 +1568,6 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
15681568
int needmore;
15691569
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
15701570

1571-
rcu_nocb_gp_cleanup(rsp, rnp);
15721571
rnp->need_future_gp[c & 0x1] = 0;
15731572
needmore = rnp->need_future_gp[(c + 1) & 0x1];
15741573
trace_rcu_future_gp(rnp, rdp, c,
@@ -1972,6 +1971,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
19721971
int nocb = 0;
19731972
struct rcu_data *rdp;
19741973
struct rcu_node *rnp = rcu_get_root(rsp);
1974+
struct swait_queue_head *sq;
19751975

19761976
WRITE_ONCE(rsp->gp_activity, jiffies);
19771977
raw_spin_lock_irq(&rnp->lock);
@@ -2010,7 +2010,9 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
20102010
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
20112011
/* smp_mb() provided by prior unlock-lock pair. */
20122012
nocb += rcu_future_gp_cleanup(rsp, rnp);
2013+
sq = rcu_nocb_gp_get(rnp);
20132014
raw_spin_unlock_irq(&rnp->lock);
2015+
rcu_nocb_gp_cleanup(sq);
20142016
cond_resched_rcu_qs();
20152017
WRITE_ONCE(rsp->gp_activity, jiffies);
20162018
rcu_gp_slow(rsp, gp_cleanup_delay);

kernel/rcu/tree.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,8 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
609609
static void increment_cpu_stall_ticks(void);
610610
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
611611
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
612-
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
612+
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
613+
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
613614
static void rcu_init_one_nocb(struct rcu_node *rnp);
614615
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
615616
bool lazy, unsigned long flags);

kernel/rcu/tree_plugin.h

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1777,9 +1777,9 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
17771777
* Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
17781778
* grace period.
17791779
*/
1780-
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1780+
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
17811781
{
1782-
swake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
1782+
swake_up_all(sq);
17831783
}
17841784

17851785
/*
@@ -1795,6 +1795,11 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
17951795
rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
17961796
}
17971797

1798+
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1799+
{
1800+
return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1801+
}
1802+
17981803
static void rcu_init_one_nocb(struct rcu_node *rnp)
17991804
{
18001805
init_swait_queue_head(&rnp->nocb_gp_wq[0]);
@@ -2469,14 +2474,19 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
24692474
return false;
24702475
}
24712476

2472-
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2477+
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
24732478
{
24742479
}
24752480

24762481
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
24772482
{
24782483
}
24792484

2485+
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2486+
{
2487+
return NULL;
2488+
}
2489+
24802490
static void rcu_init_one_nocb(struct rcu_node *rnp)
24812491
{
24822492
}

0 commit comments

Comments
 (0)