Skip to content

Commit 5a15912

Browse files
jasowangdavem330
authored andcommitted
virtio-net: fix the race between refill work and close
We try using cancel_delayed_work_sync() to prevent the work from enabling NAPI. This is insufficient since we don't disable the source of the refill work scheduling. This means an NAPI poll callback after cancel_delayed_work_sync() can schedule the refill work then can re-enable the NAPI that leads to use-after-free [1]. Since the work can enable NAPI, we can't simply disable NAPI before calling cancel_delayed_work_sync(). So fix this by introducing a dedicated boolean to control whether or not the work could be scheduled from NAPI. [1] ================================================================== BUG: KASAN: use-after-free in refill_work+0x43/0xd4 Read of size 2 at addr ffff88810562c92e by task kworker/2:1/42 CPU: 2 PID: 42 Comm: kworker/2:1 Not tainted 5.19.0-rc1+ torvalds#480 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 Workqueue: events refill_work Call Trace: <TASK> dump_stack_lvl+0x34/0x44 print_report.cold+0xbb/0x6ac ? _printk+0xad/0xde ? refill_work+0x43/0xd4 kasan_report+0xa8/0x130 ? refill_work+0x43/0xd4 refill_work+0x43/0xd4 process_one_work+0x43d/0x780 worker_thread+0x2a0/0x6f0 ? process_one_work+0x780/0x780 kthread+0x167/0x1a0 ? kthread_exit+0x50/0x50 ret_from_fork+0x22/0x30 </TASK> ... Fixes: b2baed6 ("virtio_net: set/cancel work on ndo_open/ndo_stop") Signed-off-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Xuan Zhuo <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent b5177ed commit 5a15912

File tree

1 file changed

+34
-3
lines changed

1 file changed

+34
-3
lines changed

drivers/net/virtio_net.c

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -242,9 +242,15 @@ struct virtnet_info {
242242
/* Packet virtio header size */
243243
u8 hdr_len;
244244

245-
/* Work struct for refilling if we run low on memory. */
245+
/* Work struct for delayed refilling if we run low on memory. */
246246
struct delayed_work refill;
247247

248+
/* Is delayed refill enabled? */
249+
bool refill_enabled;
250+
251+
/* The lock to synchronize the access to refill_enabled */
252+
spinlock_t refill_lock;
253+
248254
/* Work struct for config space updates */
249255
struct work_struct config_work;
250256

@@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
348354
return p;
349355
}
350356

357+
static void enable_delayed_refill(struct virtnet_info *vi)
358+
{
359+
spin_lock_bh(&vi->refill_lock);
360+
vi->refill_enabled = true;
361+
spin_unlock_bh(&vi->refill_lock);
362+
}
363+
364+
static void disable_delayed_refill(struct virtnet_info *vi)
365+
{
366+
spin_lock_bh(&vi->refill_lock);
367+
vi->refill_enabled = false;
368+
spin_unlock_bh(&vi->refill_lock);
369+
}
370+
351371
static void virtqueue_napi_schedule(struct napi_struct *napi,
352372
struct virtqueue *vq)
353373
{
@@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
15271547
}
15281548

15291549
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1530-
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1531-
schedule_delayed_work(&vi->refill, 0);
1550+
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
1551+
spin_lock(&vi->refill_lock);
1552+
if (vi->refill_enabled)
1553+
schedule_delayed_work(&vi->refill, 0);
1554+
spin_unlock(&vi->refill_lock);
1555+
}
15321556
}
15331557

15341558
u64_stats_update_begin(&rq->stats.syncp);
@@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
16511675
struct virtnet_info *vi = netdev_priv(dev);
16521676
int i, err;
16531677

1678+
enable_delayed_refill(vi);
1679+
16541680
for (i = 0; i < vi->max_queue_pairs; i++) {
16551681
if (i < vi->curr_queue_pairs)
16561682
/* Make sure we have some buffers: if oom use wq. */
@@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
20332059
struct virtnet_info *vi = netdev_priv(dev);
20342060
int i;
20352061

2062+
/* Make sure NAPI doesn't schedule refill work */
2063+
disable_delayed_refill(vi);
20362064
/* Make sure refill_work doesn't re-enable napi! */
20372065
cancel_delayed_work_sync(&vi->refill);
20382066

@@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
27922820

27932821
virtio_device_ready(vdev);
27942822

2823+
enable_delayed_refill(vi);
2824+
27952825
if (netif_running(vi->dev)) {
27962826
err = virtnet_open(vi->dev);
27972827
if (err)
@@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
35353565
vdev->priv = vi;
35363566

35373567
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
3568+
spin_lock_init(&vi->refill_lock);
35383569

35393570
/* If we can receive ANY GSO packets, we must allocate large ones. */
35403571
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||

0 commit comments

Comments
 (0)