Skip to content

Commit 38d715f

Browse files
josefbacikkdave
authored andcommitted
btrfs: use btrfs_start_delalloc_roots in shrink_delalloc
The original iteration of flushing had us flushing delalloc and then checking to see if we could make our reservation, thus we were very careful about how many pages we would flush at once. But now that everything is async and we satisfy tickets as the space becomes available we don't have to keep track of any of this, simply try and flush the number of dirty inodes we may have in order to reclaim space to make our reservation. This cleans up our delalloc flushing significantly. The async_pages stuff is dropped because btrfs_start_delalloc_roots() handles the case that we generate async extents for us, so we no longer require this extra logic. Reviewed-by: Nikolay Borisov <[email protected]> Tested-by: Nikolay Borisov <[email protected]> Reviewed-by: Johannes Thumshirn <[email protected]> Signed-off-by: Josef Bacik <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent 39753e4 commit 38d715f

File tree

1 file changed

+1
-54
lines changed

1 file changed

+1
-54
lines changed

fs/btrfs/space-info.c

Lines changed: 1 addition & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -476,28 +476,6 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
476476
up_read(&info->groups_sem);
477477
}
478478

479-
static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
480-
unsigned long nr_pages, u64 nr_items)
481-
{
482-
struct super_block *sb = fs_info->sb;
483-
484-
if (down_read_trylock(&sb->s_umount)) {
485-
writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
486-
up_read(&sb->s_umount);
487-
} else {
488-
/*
489-
* We needn't worry the filesystem going from r/w to r/o though
490-
* we don't acquire ->s_umount mutex, because the filesystem
491-
* should guarantee the delalloc inodes list be empty after
492-
* the filesystem is readonly(all dirty pages are written to
493-
* the disk).
494-
*/
495-
btrfs_start_delalloc_roots(fs_info, nr_items);
496-
if (!current->journal_info)
497-
btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
498-
}
499-
}
500-
501479
static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
502480
u64 to_reclaim)
503481
{
@@ -523,10 +501,8 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
523501
struct btrfs_trans_handle *trans;
524502
u64 delalloc_bytes;
525503
u64 dio_bytes;
526-
u64 async_pages;
527504
u64 items;
528505
long time_left;
529-
unsigned long nr_pages;
530506
int loops;
531507

532508
/* Calc the number of the pages we need flush for space reservation */
@@ -567,37 +543,8 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
567543

568544
loops = 0;
569545
while ((delalloc_bytes || dio_bytes) && loops < 3) {
570-
nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
571-
572-
/*
573-
* Triggers inode writeback for up to nr_pages. This will invoke
574-
* ->writepages callback and trigger delalloc filling
575-
* (btrfs_run_delalloc_range()).
576-
*/
577-
btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
578-
579-
/*
580-
* We need to wait for the compressed pages to start before
581-
* we continue.
582-
*/
583-
async_pages = atomic_read(&fs_info->async_delalloc_pages);
584-
if (!async_pages)
585-
goto skip_async;
586-
587-
/*
588-
* Calculate how many compressed pages we want to be written
589-
* before we continue. I.e if there are more async pages than we
590-
* require wait_event will wait until nr_pages are written.
591-
*/
592-
if (async_pages <= nr_pages)
593-
async_pages = 0;
594-
else
595-
async_pages -= nr_pages;
546+
btrfs_start_delalloc_roots(fs_info, items);
596547

597-
wait_event(fs_info->async_submit_wait,
598-
atomic_read(&fs_info->async_delalloc_pages) <=
599-
(int)async_pages);
600-
skip_async:
601548
spin_lock(&space_info->lock);
602549
if (list_empty(&space_info->tickets) &&
603550
list_empty(&space_info->priority_tickets)) {

0 commit comments

Comments
 (0)