@@ -476,8 +476,11 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
476476static void migrate_page_add (struct page * page , struct list_head * pagelist ,
477477 unsigned long flags );
478478
479- /* Scan through pages checking if pages follow certain conditions. */
480- static int check_pte_range (struct vm_area_struct * vma , pmd_t * pmd ,
479+ /*
480+ * Scan through pages checking if pages follow certain conditions,
481+ * and move them to the pagelist if they do.
482+ */
483+ static int queue_pages_pte_range (struct vm_area_struct * vma , pmd_t * pmd ,
481484 unsigned long addr , unsigned long end ,
482485 const nodemask_t * nodes , unsigned long flags ,
483486 void * private )
@@ -515,8 +518,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
515518 return addr != end ;
516519}
517520
518- static void check_hugetlb_pmd_range (struct vm_area_struct * vma , pmd_t * pmd ,
519- const nodemask_t * nodes , unsigned long flags ,
521+ static void queue_pages_hugetlb_pmd_range (struct vm_area_struct * vma ,
522+ pmd_t * pmd , const nodemask_t * nodes , unsigned long flags ,
520523 void * private )
521524{
522525#ifdef CONFIG_HUGETLB_PAGE
@@ -539,7 +542,7 @@ static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
539542#endif
540543}
541544
542- static inline int check_pmd_range (struct vm_area_struct * vma , pud_t * pud ,
545+ static inline int queue_pages_pmd_range (struct vm_area_struct * vma , pud_t * pud ,
543546 unsigned long addr , unsigned long end ,
544547 const nodemask_t * nodes , unsigned long flags ,
545548 void * private )
@@ -553,21 +556,21 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
553556 if (!pmd_present (* pmd ))
554557 continue ;
555558 if (pmd_huge (* pmd ) && is_vm_hugetlb_page (vma )) {
556- check_hugetlb_pmd_range (vma , pmd , nodes ,
559+ queue_pages_hugetlb_pmd_range (vma , pmd , nodes ,
557560 flags , private );
558561 continue ;
559562 }
560563 split_huge_page_pmd (vma , addr , pmd );
561564 if (pmd_none_or_trans_huge_or_clear_bad (pmd ))
562565 continue ;
563- if (check_pte_range (vma , pmd , addr , next , nodes ,
566+ if (queue_pages_pte_range (vma , pmd , addr , next , nodes ,
564567 flags , private ))
565568 return - EIO ;
566569 } while (pmd ++ , addr = next , addr != end );
567570 return 0 ;
568571}
569572
570- static inline int check_pud_range (struct vm_area_struct * vma , pgd_t * pgd ,
573+ static inline int queue_pages_pud_range (struct vm_area_struct * vma , pgd_t * pgd ,
571574 unsigned long addr , unsigned long end ,
572575 const nodemask_t * nodes , unsigned long flags ,
573576 void * private )
@@ -582,14 +585,14 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
582585 continue ;
583586 if (pud_none_or_clear_bad (pud ))
584587 continue ;
585- if (check_pmd_range (vma , pud , addr , next , nodes ,
588+ if (queue_pages_pmd_range (vma , pud , addr , next , nodes ,
586589 flags , private ))
587590 return - EIO ;
588591 } while (pud ++ , addr = next , addr != end );
589592 return 0 ;
590593}
591594
592- static inline int check_pgd_range (struct vm_area_struct * vma ,
595+ static inline int queue_pages_pgd_range (struct vm_area_struct * vma ,
593596 unsigned long addr , unsigned long end ,
594597 const nodemask_t * nodes , unsigned long flags ,
595598 void * private )
@@ -602,7 +605,7 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
602605 next = pgd_addr_end (addr , end );
603606 if (pgd_none_or_clear_bad (pgd ))
604607 continue ;
605- if (check_pud_range (vma , pgd , addr , next , nodes ,
608+ if (queue_pages_pud_range (vma , pgd , addr , next , nodes ,
606609 flags , private ))
607610 return - EIO ;
608611 } while (pgd ++ , addr = next , addr != end );
@@ -640,12 +643,14 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
640643#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
641644
642645/*
643- * Check if all pages in a range are on a set of nodes.
644- * If pagelist != NULL then isolate pages from the LRU and
645- * put them on the pagelist.
646+ * Walk through page tables and collect pages to be migrated.
647+ *
648+ * If pages found in a given range are on a set of nodes (determined by
649+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
650+ * passed via @private.)
646651 */
647652static struct vm_area_struct *
648- check_range (struct mm_struct * mm , unsigned long start , unsigned long end ,
653+ queue_pages_range (struct mm_struct * mm , unsigned long start , unsigned long end ,
649654 const nodemask_t * nodes , unsigned long flags , void * private )
650655{
651656 int err ;
@@ -680,7 +685,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
680685 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL )) &&
681686 vma_migratable (vma ))) {
682687
683- err = check_pgd_range (vma , start , endvma , nodes ,
688+ err = queue_pages_pgd_range (vma , start , endvma , nodes ,
684689 flags , private );
685690 if (err ) {
686691 first = ERR_PTR (err );
@@ -1050,7 +1055,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
10501055 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
10511056 */
10521057 VM_BUG_ON (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL )));
1053- check_range (mm , mm -> mmap -> vm_start , mm -> task_size , & nmask ,
1058+ queue_pages_range (mm , mm -> mmap -> vm_start , mm -> task_size , & nmask ,
10541059 flags | MPOL_MF_DISCONTIG_OK , & pagelist );
10551060
10561061 if (!list_empty (& pagelist )) {
@@ -1288,7 +1293,7 @@ static long do_mbind(unsigned long start, unsigned long len,
12881293 if (err )
12891294 goto mpol_out ;
12901295
1291- vma = check_range (mm , start , end , nmask ,
1296+ vma = queue_pages_range (mm , start , end , nmask ,
12921297 flags | MPOL_MF_INVERT , & pagelist );
12931298
12941299 err = PTR_ERR (vma ); /* maybe ... */
0 commit comments