Skip to content

Commit 8510e69

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/page_alloc: fix memalloc_nocma_{save/restore} APIs
Currently, memalloc_nocma_{save/restore} API that prevents CMA area in page allocation is implemented by using current_gfp_context(). However, there are two problems of this implementation. First, this doesn't work for allocation fastpath. In the fastpath, original gfp_mask is used since current_gfp_context() is introduced in order to control reclaim and it is on slowpath. So, CMA area can be allocated through the allocation fastpath even if memalloc_nocma_{save/restore} APIs are used. Currently, there is just one user for these APIs and it has a fallback method to prevent actual problem. Second, clearing __GFP_MOVABLE in current_gfp_context() has a side effect to exclude the memory on the ZONE_MOVABLE for allocation target. To fix these problems, this patch changes the implementation to exclude CMA area in page allocation. Main point of this change is using the alloc_flags. alloc_flags is mainly used to control allocation so it fits for excluding CMA area in allocation. Fixes: d7fefcc (mm/cma: add PF flag to force non cma alloc) Signed-off-by: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Aneesh Kumar K . V" <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 182f3d7 commit 8510e69

File tree

2 files changed

+22
-17
lines changed

2 files changed

+22
-17
lines changed

include/linux/sched/mm.h

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -175,12 +175,10 @@ static inline bool in_vfork(struct task_struct *tsk)
175175
* Applies per-task gfp context to the given allocation flags.
176176
* PF_MEMALLOC_NOIO implies GFP_NOIO
177177
* PF_MEMALLOC_NOFS implies GFP_NOFS
178-
* PF_MEMALLOC_NOCMA implies no allocation from CMA region.
179178
*/
180179
static inline gfp_t current_gfp_context(gfp_t flags)
181180
{
182-
if (unlikely(current->flags &
183-
(PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
181+
if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
184182
/*
185183
* NOIO implies both NOIO and NOFS and it is a weaker context
186184
* so always make sure it makes precedence
@@ -189,10 +187,6 @@ static inline gfp_t current_gfp_context(gfp_t flags)
189187
flags &= ~(__GFP_IO | __GFP_FS);
190188
else if (current->flags & PF_MEMALLOC_NOFS)
191189
flags &= ~__GFP_FS;
192-
#ifdef CONFIG_CMA
193-
if (current->flags & PF_MEMALLOC_NOCMA)
194-
flags &= ~__GFP_MOVABLE;
195-
#endif
196190
}
197191
return flags;
198192
}

mm/page_alloc.c

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2785,7 +2785,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
27852785
* allocating from CMA when over half of the zone's free memory
27862786
* is in the CMA area.
27872787
*/
2788-
if (migratetype == MIGRATE_MOVABLE &&
2788+
if (alloc_flags & ALLOC_CMA &&
27892789
zone_page_state(zone, NR_FREE_CMA_PAGES) >
27902790
zone_page_state(zone, NR_FREE_PAGES) / 2) {
27912791
page = __rmqueue_cma_fallback(zone, order);
@@ -2796,7 +2796,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
27962796
retry:
27972797
page = __rmqueue_smallest(zone, order, migratetype);
27982798
if (unlikely(!page)) {
2799-
if (migratetype == MIGRATE_MOVABLE)
2799+
if (alloc_flags & ALLOC_CMA)
28002800
page = __rmqueue_cma_fallback(zone, order);
28012801

28022802
if (!page && __rmqueue_fallback(zone, order, migratetype,
@@ -3687,6 +3687,20 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
36873687
return alloc_flags;
36883688
}
36893689

3690+
static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
3691+
unsigned int alloc_flags)
3692+
{
3693+
#ifdef CONFIG_CMA
3694+
unsigned int pflags = current->flags;
3695+
3696+
if (!(pflags & PF_MEMALLOC_NOCMA) &&
3697+
gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3698+
alloc_flags |= ALLOC_CMA;
3699+
3700+
#endif
3701+
return alloc_flags;
3702+
}
3703+
36903704
/*
36913705
* get_page_from_freelist goes through the zonelist trying to allocate
36923706
* a page.
@@ -4333,10 +4347,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
43334347
} else if (unlikely(rt_task(current)) && !in_interrupt())
43344348
alloc_flags |= ALLOC_HARDER;
43354349

4336-
#ifdef CONFIG_CMA
4337-
if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4338-
alloc_flags |= ALLOC_CMA;
4339-
#endif
4350+
alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
4351+
43404352
return alloc_flags;
43414353
}
43424354

@@ -4637,7 +4649,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
46374649

46384650
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
46394651
if (reserve_flags)
4640-
alloc_flags = reserve_flags;
4652+
alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
46414653

46424654
/*
46434655
* Reset the nodemask and zonelist iterators if memory policies can be
@@ -4714,7 +4726,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
47144726

47154727
/* Avoid allocations with no watermarks from looping endlessly */
47164728
if (tsk_is_oom_victim(current) &&
4717-
(alloc_flags == ALLOC_OOM ||
4729+
(alloc_flags & ALLOC_OOM ||
47184730
(gfp_mask & __GFP_NOMEMALLOC)))
47194731
goto nopage;
47204732

@@ -4806,8 +4818,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
48064818
if (should_fail_alloc_page(gfp_mask, order))
48074819
return false;
48084820

4809-
if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4810-
*alloc_flags |= ALLOC_CMA;
4821+
*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
48114822

48124823
return true;
48134824
}

0 commit comments

Comments
 (0)