10
10
#include <linux/blkdev.h>
11
11
#include <linux/swap.h>
12
12
#include <linux/version.h>
13
+ #include <linux/writeback.h>
13
14
#include "extent_map.h"
14
15
15
16
/* temporary define until extent_map moves out of btrfs */
@@ -35,6 +36,12 @@ struct tree_entry {
35
36
struct rb_node rb_node ;
36
37
};
37
38
39
+ struct extent_page_data {
40
+ struct bio * bio ;
41
+ struct extent_map_tree * tree ;
42
+ get_extent_t * get_extent ;
43
+ };
44
+
38
45
void __init extent_map_init (void )
39
46
{
40
47
extent_map_cache = btrfs_cache_create ("extent_map" ,
@@ -1460,40 +1467,76 @@ static int end_bio_extent_preparewrite(struct bio *bio,
1460
1467
#endif
1461
1468
}
1462
1469
1463
- static int submit_extent_page (int rw , struct extent_map_tree * tree ,
1464
- struct page * page , sector_t sector ,
1465
- size_t size , unsigned long offset ,
1466
- struct block_device * bdev ,
1467
- bio_end_io_t end_io_func )
1470
+ static struct bio *
1471
+ extent_bio_alloc (struct block_device * bdev , u64 first_sector , int nr_vecs ,
1472
+ gfp_t gfp_flags )
1468
1473
{
1469
1474
struct bio * bio ;
1470
- int ret = 0 ;
1471
1475
1472
- bio = bio_alloc (GFP_NOIO , 1 );
1476
+ bio = bio_alloc (gfp_flags , nr_vecs );
1473
1477
1474
- bio -> bi_sector = sector ;
1475
- bio -> bi_bdev = bdev ;
1476
- bio -> bi_io_vec [0 ].bv_page = page ;
1477
- bio -> bi_io_vec [0 ].bv_len = size ;
1478
- bio -> bi_io_vec [0 ].bv_offset = offset ;
1479
-
1480
- bio -> bi_vcnt = 1 ;
1481
- bio -> bi_idx = 0 ;
1482
- bio -> bi_size = size ;
1478
+ if (bio == NULL && (current -> flags & PF_MEMALLOC )) {
1479
+ while (!bio && (nr_vecs /= 2 ))
1480
+ bio = bio_alloc (gfp_flags , nr_vecs );
1481
+ }
1483
1482
1484
- bio -> bi_end_io = end_io_func ;
1485
- bio -> bi_private = tree ;
1483
+ if (bio ) {
1484
+ bio -> bi_bdev = bdev ;
1485
+ bio -> bi_sector = first_sector ;
1486
+ }
1487
+ return bio ;
1488
+ }
1486
1489
1490
+ static int submit_one_bio (int rw , struct bio * bio )
1491
+ {
1492
+ int ret = 0 ;
1487
1493
bio_get (bio );
1488
1494
submit_bio (rw , bio );
1489
-
1490
1495
if (bio_flagged (bio , BIO_EOPNOTSUPP ))
1491
1496
ret = - EOPNOTSUPP ;
1492
-
1493
1497
bio_put (bio );
1494
1498
return ret ;
1495
1499
}
1496
1500
1501
+ static int submit_extent_page (int rw , struct extent_map_tree * tree ,
1502
+ struct page * page , sector_t sector ,
1503
+ size_t size , unsigned long offset ,
1504
+ struct block_device * bdev ,
1505
+ struct bio * * bio_ret ,
1506
+ int max_pages ,
1507
+ bio_end_io_t end_io_func )
1508
+ {
1509
+ int ret = 0 ;
1510
+ struct bio * bio ;
1511
+ int nr ;
1512
+
1513
+ if (bio_ret && * bio_ret ) {
1514
+ bio = * bio_ret ;
1515
+ if (bio -> bi_sector + (bio -> bi_size >> 9 ) != sector ||
1516
+ bio_add_page (bio , page , size , offset ) < size ) {
1517
+ ret = submit_one_bio (rw , bio );
1518
+ bio = NULL ;
1519
+ } else {
1520
+ return 0 ;
1521
+ }
1522
+ }
1523
+ nr = min (max_pages , bio_get_nr_vecs (bdev ));
1524
+ bio = extent_bio_alloc (bdev , sector , nr , GFP_NOFS | __GFP_HIGH );
1525
+ if (!bio ) {
1526
+ printk ("failed to allocate bio nr %d\n" , nr );
1527
+ }
1528
+ bio_add_page (bio , page , size , offset );
1529
+ bio -> bi_end_io = end_io_func ;
1530
+ bio -> bi_private = tree ;
1531
+ if (bio_ret ) {
1532
+ * bio_ret = bio ;
1533
+ } else {
1534
+ ret = submit_one_bio (rw , bio );
1535
+ }
1536
+
1537
+ return ret ;
1538
+ }
1539
+
1497
1540
void set_page_extent_mapped (struct page * page )
1498
1541
{
1499
1542
if (!PagePrivate (page )) {
@@ -1590,7 +1633,8 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1590
1633
if (!ret ) {
1591
1634
ret = submit_extent_page (READ , tree , page ,
1592
1635
sector , iosize , page_offset ,
1593
- bdev , end_bio_extent_readpage );
1636
+ bdev , NULL , 1 ,
1637
+ end_bio_extent_readpage );
1594
1638
}
1595
1639
if (ret )
1596
1640
SetPageError (page );
@@ -1613,11 +1657,12 @@ EXPORT_SYMBOL(extent_read_full_page);
1613
1657
* are found, they are marked writeback. Then the lock bits are removed
1614
1658
* and the end_io handler clears the writeback ranges
1615
1659
*/
1616
- int extent_write_full_page (struct extent_map_tree * tree , struct page * page ,
1617
- get_extent_t * get_extent ,
1618
- struct writeback_control * wbc )
1660
+ static int __extent_writepage (struct page * page , struct writeback_control * wbc ,
1661
+ void * data )
1619
1662
{
1620
1663
struct inode * inode = page -> mapping -> host ;
1664
+ struct extent_page_data * epd = data ;
1665
+ struct extent_map_tree * tree = epd -> tree ;
1621
1666
u64 start = (u64 )page -> index << PAGE_CACHE_SHIFT ;
1622
1667
u64 page_end = start + PAGE_CACHE_SIZE - 1 ;
1623
1668
u64 end ;
@@ -1691,7 +1736,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1691
1736
clear_extent_dirty (tree , cur , page_end , GFP_NOFS );
1692
1737
break ;
1693
1738
}
1694
- em = get_extent (inode , page , page_offset , cur , end , 1 );
1739
+ em = epd -> get_extent (inode , page , page_offset , cur , end , 1 );
1695
1740
if (IS_ERR (em ) || !em ) {
1696
1741
SetPageError (page );
1697
1742
break ;
@@ -1734,9 +1779,12 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1734
1779
if (ret )
1735
1780
SetPageError (page );
1736
1781
else {
1782
+ unsigned long nr = end_index + 1 ;
1737
1783
set_range_writeback (tree , cur , cur + iosize - 1 );
1784
+
1738
1785
ret = submit_extent_page (WRITE , tree , page , sector ,
1739
1786
iosize , page_offset , bdev ,
1787
+ & epd -> bio , nr ,
1740
1788
end_bio_extent_writepage );
1741
1789
if (ret )
1742
1790
SetPageError (page );
@@ -1750,8 +1798,44 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1750
1798
unlock_page (page );
1751
1799
return 0 ;
1752
1800
}
1801
+
1802
+ int extent_write_full_page (struct extent_map_tree * tree , struct page * page ,
1803
+ get_extent_t * get_extent ,
1804
+ struct writeback_control * wbc )
1805
+ {
1806
+ int ret ;
1807
+ struct extent_page_data epd = {
1808
+ .bio = NULL ,
1809
+ .tree = tree ,
1810
+ .get_extent = get_extent ,
1811
+ };
1812
+
1813
+ ret = __extent_writepage (page , wbc , & epd );
1814
+ if (epd .bio )
1815
+ submit_one_bio (WRITE , epd .bio );
1816
+ return ret ;
1817
+ }
1753
1818
EXPORT_SYMBOL (extent_write_full_page );
1754
1819
1820
+ int extent_writepages (struct extent_map_tree * tree ,
1821
+ struct address_space * mapping ,
1822
+ get_extent_t * get_extent ,
1823
+ struct writeback_control * wbc )
1824
+ {
1825
+ int ret ;
1826
+ struct extent_page_data epd = {
1827
+ .bio = NULL ,
1828
+ .tree = tree ,
1829
+ .get_extent = get_extent ,
1830
+ };
1831
+
1832
+ ret = write_cache_pages (mapping , wbc , __extent_writepage , & epd );
1833
+ if (epd .bio )
1834
+ submit_one_bio (WRITE , epd .bio );
1835
+ return ret ;
1836
+ }
1837
+ EXPORT_SYMBOL (extent_writepages );
1838
+
1755
1839
/*
1756
1840
* basic invalidatepage code, this waits on any locked or writeback
1757
1841
* ranges corresponding to the page, and then deletes any extent state
@@ -1869,6 +1953,7 @@ int extent_prepare_write(struct extent_map_tree *tree,
1869
1953
EXTENT_LOCKED , 0 , NULL , GFP_NOFS );
1870
1954
ret = submit_extent_page (READ , tree , page ,
1871
1955
sector , iosize , page_offset , em -> bdev ,
1956
+ NULL , 1 ,
1872
1957
end_bio_extent_preparewrite );
1873
1958
iocount ++ ;
1874
1959
block_start = block_start + iosize ;
0 commit comments