@@ -172,8 +172,8 @@ xfs_buf_stale(
172172struct xfs_buf *
173173xfs_buf_alloc (
174174 struct xfs_buftarg * target ,
175- xfs_off_t range_base ,
176- size_t range_length ,
175+ xfs_daddr_t blkno ,
176+ size_t numblks ,
177177 xfs_buf_flags_t flags )
178178{
179179 struct xfs_buf * bp ;
@@ -196,14 +196,21 @@ xfs_buf_alloc(
196196 sema_init (& bp -> b_sema , 0 ); /* held, no waiters */
197197 XB_SET_OWNER (bp );
198198 bp -> b_target = target ;
199- bp -> b_file_offset = range_base ;
199+ bp -> b_file_offset = blkno << BBSHIFT ;
200200 /*
201201 * Set buffer_length and count_desired to the same value initially.
202202 * I/O routines should use count_desired, which will be the same in
203203 * most cases but may be reset (e.g. XFS recovery).
204204 */
205- bp -> b_buffer_length = bp -> b_count_desired = range_length ;
205+ bp -> b_buffer_length = bp -> b_count_desired = numblks << BBSHIFT ;
206206 bp -> b_flags = flags ;
207+
208+ /*
209+ * We do not set the block number here in the buffer because we have not
210+ * finished initialising the buffer. We insert the buffer into the cache
211+ * in this state, so this ensures that we are unable to do IO on a
212+ * buffer that hasn't been fully initialised.
213+ */
207214 bp -> b_bn = XFS_BUF_DADDR_NULL ;
208215 atomic_set (& bp -> b_pin_count , 0 );
209216 init_waitqueue_head (& bp -> b_waiters );
@@ -426,29 +433,29 @@ _xfs_buf_map_pages(
426433 */
427434xfs_buf_t *
428435_xfs_buf_find (
429- xfs_buftarg_t * btp , /* block device target */
430- xfs_off_t ioff , /* starting offset of range */
431- size_t isize , /* length of range */
436+ struct xfs_buftarg * btp ,
437+ xfs_daddr_t blkno ,
438+ size_t numblks ,
432439 xfs_buf_flags_t flags ,
433440 xfs_buf_t * new_bp )
434441{
435- xfs_off_t range_base ;
436- size_t range_length ;
442+ xfs_off_t offset ;
443+ size_t numbytes ;
437444 struct xfs_perag * pag ;
438445 struct rb_node * * rbp ;
439446 struct rb_node * parent ;
440447 xfs_buf_t * bp ;
441448
442- range_base = ( ioff << BBSHIFT );
443- range_length = ( isize << BBSHIFT );
449+ offset = BBTOB ( blkno );
450+ numbytes = BBTOB ( numblks );
444451
445452 /* Check for IOs smaller than the sector size / not sector aligned */
446- ASSERT (!(range_length < (1 << btp -> bt_sshift )));
447- ASSERT (!(range_base & (xfs_off_t )btp -> bt_smask ));
453+ ASSERT (!(numbytes < (1 << btp -> bt_sshift )));
454+ ASSERT (!(offset & (xfs_off_t )btp -> bt_smask ));
448455
449456 /* get tree root */
450457 pag = xfs_perag_get (btp -> bt_mount ,
451- xfs_daddr_to_agno (btp -> bt_mount , ioff ));
458+ xfs_daddr_to_agno (btp -> bt_mount , blkno ));
452459
453460 /* walk tree */
454461 spin_lock (& pag -> pag_buf_lock );
@@ -459,9 +466,9 @@ _xfs_buf_find(
459466 parent = * rbp ;
460467 bp = rb_entry (parent , struct xfs_buf , b_rbnode );
461468
462- if (range_base < bp -> b_file_offset )
469+ if (offset < bp -> b_file_offset )
463470 rbp = & (* rbp )-> rb_left ;
464- else if (range_base > bp -> b_file_offset )
471+ else if (offset > bp -> b_file_offset )
465472 rbp = & (* rbp )-> rb_right ;
466473 else {
467474 /*
@@ -472,7 +479,7 @@ _xfs_buf_find(
472479 * reallocating a busy extent. Skip this buffer and
473480 * continue searching to the right for an exact match.
474481 */
475- if (bp -> b_buffer_length != range_length ) {
482+ if (bp -> b_buffer_length != numbytes ) {
476483 ASSERT (bp -> b_flags & XBF_STALE );
477484 rbp = & (* rbp )-> rb_right ;
478485 continue ;
@@ -532,21 +539,20 @@ _xfs_buf_find(
532539 */
533540struct xfs_buf *
534541xfs_buf_get (
535- xfs_buftarg_t * target ,/* target for buffer */
536- xfs_off_t ioff , /* starting offset of range */
537- size_t isize , /* length of range */
542+ xfs_buftarg_t * target ,
543+ xfs_daddr_t blkno ,
544+ size_t numblks ,
538545 xfs_buf_flags_t flags )
539546{
540547 struct xfs_buf * bp ;
541548 struct xfs_buf * new_bp ;
542549 int error = 0 ;
543550
544- bp = _xfs_buf_find (target , ioff , isize , flags , NULL );
551+ bp = _xfs_buf_find (target , blkno , numblks , flags , NULL );
545552 if (likely (bp ))
546553 goto found ;
547554
548- new_bp = xfs_buf_alloc (target , ioff << BBSHIFT , isize << BBSHIFT ,
549- flags );
555+ new_bp = xfs_buf_alloc (target , blkno , numblks , flags );
550556 if (unlikely (!new_bp ))
551557 return NULL ;
552558
@@ -556,7 +562,7 @@ xfs_buf_get(
556562 return NULL ;
557563 }
558564
559- bp = _xfs_buf_find (target , ioff , isize , flags , new_bp );
565+ bp = _xfs_buf_find (target , blkno , numblks , flags , new_bp );
560566 if (!bp ) {
561567 xfs_buf_free (new_bp );
562568 return NULL ;
@@ -569,7 +575,7 @@ xfs_buf_get(
569575 * Now we have a workable buffer, fill in the block number so
570576 * that we can do IO on it.
571577 */
572- bp -> b_bn = ioff ;
578+ bp -> b_bn = blkno ;
573579 bp -> b_count_desired = bp -> b_buffer_length ;
574580
575581found :
@@ -613,15 +619,15 @@ _xfs_buf_read(
613619xfs_buf_t *
614620xfs_buf_read (
615621 xfs_buftarg_t * target ,
616- xfs_off_t ioff ,
617- size_t isize ,
622+ xfs_daddr_t blkno ,
623+ size_t numblks ,
618624 xfs_buf_flags_t flags )
619625{
620626 xfs_buf_t * bp ;
621627
622628 flags |= XBF_READ ;
623629
624- bp = xfs_buf_get (target , ioff , isize , flags );
630+ bp = xfs_buf_get (target , blkno , numblks , flags );
625631 if (bp ) {
626632 trace_xfs_buf_read (bp , flags , _RET_IP_ );
627633
@@ -656,13 +662,13 @@ xfs_buf_read(
656662void
657663xfs_buf_readahead (
658664 xfs_buftarg_t * target ,
659- xfs_off_t ioff ,
660- size_t isize )
665+ xfs_daddr_t blkno ,
666+ size_t numblks )
661667{
662668 if (bdi_read_congested (target -> bt_bdi ))
663669 return ;
664670
665- xfs_buf_read (target , ioff , isize ,
671+ xfs_buf_read (target , blkno , numblks ,
666672 XBF_TRYLOCK |XBF_ASYNC |XBF_READ_AHEAD |XBF_DONT_BLOCK );
667673}
668674
@@ -672,24 +678,23 @@ xfs_buf_readahead(
672678 */
673679struct xfs_buf *
674680xfs_buf_read_uncached (
675- struct xfs_mount * mp ,
676681 struct xfs_buftarg * target ,
677682 xfs_daddr_t daddr ,
678- size_t length ,
683+ size_t numblks ,
679684 int flags )
680685{
681686 xfs_buf_t * bp ;
682687 int error ;
683688
684- bp = xfs_buf_get_uncached (target , length , flags );
689+ bp = xfs_buf_get_uncached (target , numblks , flags );
685690 if (!bp )
686691 return NULL ;
687692
688693 /* set up the buffer for a read IO */
689694 XFS_BUF_SET_ADDR (bp , daddr );
690695 XFS_BUF_READ (bp );
691696
692- xfsbdstrat (mp , bp );
697+ xfsbdstrat (target -> bt_mount , bp );
693698 error = xfs_buf_iowait (bp );
694699 if (error ) {
695700 xfs_buf_relse (bp );
@@ -705,7 +710,7 @@ xfs_buf_read_uncached(
705710void
706711xfs_buf_set_empty (
707712 struct xfs_buf * bp ,
708- size_t len )
713+ size_t numblks )
709714{
710715 if (bp -> b_pages )
711716 _xfs_buf_free_pages (bp );
@@ -714,7 +719,7 @@ xfs_buf_set_empty(
714719 bp -> b_page_count = 0 ;
715720 bp -> b_addr = NULL ;
716721 bp -> b_file_offset = 0 ;
717- bp -> b_buffer_length = bp -> b_count_desired = len ;
722+ bp -> b_buffer_length = bp -> b_count_desired = numblks << BBSHIFT ;
718723 bp -> b_bn = XFS_BUF_DADDR_NULL ;
719724 bp -> b_flags &= ~XBF_MAPPED ;
720725}
@@ -776,17 +781,18 @@ xfs_buf_associate_memory(
776781xfs_buf_t *
777782xfs_buf_get_uncached (
778783 struct xfs_buftarg * target ,
779- size_t len ,
784+ size_t numblks ,
780785 int flags )
781786{
782- unsigned long page_count = PAGE_ALIGN ( len ) >> PAGE_SHIFT ;
787+ unsigned long page_count ;
783788 int error , i ;
784789 xfs_buf_t * bp ;
785790
786- bp = xfs_buf_alloc (target , 0 , len , 0 );
791+ bp = xfs_buf_alloc (target , 0 , numblks , 0 );
787792 if (unlikely (bp == NULL ))
788793 goto fail ;
789794
795+ page_count = PAGE_ALIGN (numblks << BBSHIFT ) >> PAGE_SHIFT ;
790796 error = _xfs_buf_get_pages (bp , page_count , 0 );
791797 if (error )
792798 goto fail_free_buf ;
0 commit comments