Skip to content

Commit 7c70cb9

Browse files
author
Peter Zijlstra
committed
sched: Add Lazy preemption model
Change fair to use resched_curr_lazy(), which, when the lazy preemption model is selected, will set TIF_NEED_RESCHED_LAZY. This LAZY bit will be promoted to the full NEED_RESCHED bit on tick. As such, the average delay between setting LAZY and actually rescheduling will be TICK_NSEC/2. In short, Lazy preemption will delay preemption for fair class but will function as Full preemption for all the other classes, most notably the realtime (RR/FIFO/DEADLINE) classes. The goal is to bridge the performance gap with Voluntary, such that we might eventually remove that option entirely. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Sebastian Andrzej Siewior <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 26baa1f commit 7c70cb9

File tree

6 files changed

+107
-8
lines changed

6 files changed

+107
-8
lines changed

include/linux/preempt.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
486486
extern bool preempt_model_none(void);
487487
extern bool preempt_model_voluntary(void);
488488
extern bool preempt_model_full(void);
489+
extern bool preempt_model_lazy(void);
489490

490491
#else
491492

@@ -502,6 +503,11 @@ static inline bool preempt_model_full(void)
502503
return IS_ENABLED(CONFIG_PREEMPT);
503504
}
504505

506+
static inline bool preempt_model_lazy(void)
507+
{
508+
return IS_ENABLED(CONFIG_PREEMPT_LAZY);
509+
}
510+
505511
#endif
506512

507513
static inline bool preempt_model_rt(void)
@@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void)
519525
*/
520526
static inline bool preempt_model_preemptible(void)
521527
{
522-
return preempt_model_full() || preempt_model_rt();
528+
return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
523529
}
524530

525531
#endif /* __LINUX_PREEMPT_H */

kernel/Kconfig.preempt

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ config PREEMPT_BUILD
1111
select PREEMPTION
1212
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
1313

14+
config ARCH_HAS_PREEMPT_LAZY
15+
bool
16+
1417
choice
1518
prompt "Preemption Model"
1619
default PREEMPT_NONE
@@ -67,6 +70,18 @@ config PREEMPT
6770
embedded system with latency requirements in the milliseconds
6871
range.
6972

73+
config PREEMPT_LAZY
74+
bool "Scheduler controlled preemption model"
75+
depends on !ARCH_NO_PREEMPT
76+
depends on ARCH_HAS_PREEMPT_LAZY
77+
select PREEMPT_BUILD
78+
help
79+
This option provides a scheduler driven preemption model that
80+
is fundamentally similar to full preemption, but is less
81+
eager to preempt SCHED_NORMAL tasks in an attempt to
82+
reduce lock holder preemption and recover some of the performance
83+
gains seen from using Voluntary preemption.
84+
7085
config PREEMPT_RT
7186
bool "Fully Preemptible Kernel (Real-Time)"
7287
depends on EXPERT && ARCH_SUPPORTS_RT

kernel/sched/core.c

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,6 +1083,13 @@ static void __resched_curr(struct rq *rq, int tif)
10831083

10841084
lockdep_assert_rq_held(rq);
10851085

1086+
/*
1087+
* Always immediately preempt the idle task; no point in delaying doing
1088+
* actual work.
1089+
*/
1090+
if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1091+
tif = TIF_NEED_RESCHED;
1092+
10861093
if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
10871094
return;
10881095

@@ -1108,6 +1115,32 @@ void resched_curr(struct rq *rq)
11081115
__resched_curr(rq, TIF_NEED_RESCHED);
11091116
}
11101117

1118+
#ifdef CONFIG_PREEMPT_DYNAMIC
1119+
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
1120+
static __always_inline bool dynamic_preempt_lazy(void)
1121+
{
1122+
return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1123+
}
1124+
#else
1125+
static __always_inline bool dynamic_preempt_lazy(void)
1126+
{
1127+
return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1128+
}
1129+
#endif
1130+
1131+
static __always_inline int get_lazy_tif_bit(void)
1132+
{
1133+
if (dynamic_preempt_lazy())
1134+
return TIF_NEED_RESCHED_LAZY;
1135+
1136+
return TIF_NEED_RESCHED;
1137+
}
1138+
1139+
void resched_curr_lazy(struct rq *rq)
1140+
{
1141+
__resched_curr(rq, get_lazy_tif_bit());
1142+
}
1143+
11111144
void resched_cpu(int cpu)
11121145
{
11131146
struct rq *rq = cpu_rq(cpu);
@@ -5612,6 +5645,10 @@ void sched_tick(void)
56125645
update_rq_clock(rq);
56135646
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
56145647
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5648+
5649+
if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5650+
resched_curr(rq);
5651+
56155652
donor->sched_class->task_tick(rq, donor, 0);
56165653
if (sched_feat(LATENCY_WARN))
56175654
resched_latency = cpu_resched_latency(rq);
@@ -7374,27 +7411,39 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
73747411
* preempt_schedule <- NOP
73757412
* preempt_schedule_notrace <- NOP
73767413
* irqentry_exit_cond_resched <- NOP
7414+
* dynamic_preempt_lazy <- false
73777415
*
73787416
* VOLUNTARY:
73797417
* cond_resched <- __cond_resched
73807418
* might_resched <- __cond_resched
73817419
* preempt_schedule <- NOP
73827420
* preempt_schedule_notrace <- NOP
73837421
* irqentry_exit_cond_resched <- NOP
7422+
* dynamic_preempt_lazy <- false
73847423
*
73857424
* FULL:
73867425
* cond_resched <- RET0
73877426
* might_resched <- RET0
73887427
* preempt_schedule <- preempt_schedule
73897428
* preempt_schedule_notrace <- preempt_schedule_notrace
73907429
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7430+
* dynamic_preempt_lazy <- false
7431+
*
7432+
* LAZY:
7433+
* cond_resched <- RET0
7434+
* might_resched <- RET0
7435+
* preempt_schedule <- preempt_schedule
7436+
* preempt_schedule_notrace <- preempt_schedule_notrace
7437+
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7438+
* dynamic_preempt_lazy <- true
73917439
*/
73927440

73937441
enum {
73947442
preempt_dynamic_undefined = -1,
73957443
preempt_dynamic_none,
73967444
preempt_dynamic_voluntary,
73977445
preempt_dynamic_full,
7446+
preempt_dynamic_lazy,
73987447
};
73997448

74007449
int preempt_dynamic_mode = preempt_dynamic_undefined;
@@ -7410,15 +7459,23 @@ int sched_dynamic_mode(const char *str)
74107459
if (!strcmp(str, "full"))
74117460
return preempt_dynamic_full;
74127461

7462+
#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7463+
if (!strcmp(str, "lazy"))
7464+
return preempt_dynamic_lazy;
7465+
#endif
7466+
74137467
return -EINVAL;
74147468
}
74157469

7470+
#define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7471+
#define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7472+
74167473
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
74177474
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
74187475
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
74197476
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7420-
#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
7421-
#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
7477+
#define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7478+
#define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
74227479
#else
74237480
#error "Unsupported PREEMPT_DYNAMIC mechanism"
74247481
#endif
@@ -7438,6 +7495,7 @@ static void __sched_dynamic_update(int mode)
74387495
preempt_dynamic_enable(preempt_schedule);
74397496
preempt_dynamic_enable(preempt_schedule_notrace);
74407497
preempt_dynamic_enable(irqentry_exit_cond_resched);
7498+
preempt_dynamic_key_disable(preempt_lazy);
74417499

74427500
switch (mode) {
74437501
case preempt_dynamic_none:
@@ -7447,6 +7505,7 @@ static void __sched_dynamic_update(int mode)
74477505
preempt_dynamic_disable(preempt_schedule);
74487506
preempt_dynamic_disable(preempt_schedule_notrace);
74497507
preempt_dynamic_disable(irqentry_exit_cond_resched);
7508+
preempt_dynamic_key_disable(preempt_lazy);
74507509
if (mode != preempt_dynamic_mode)
74517510
pr_info("Dynamic Preempt: none\n");
74527511
break;
@@ -7458,6 +7517,7 @@ static void __sched_dynamic_update(int mode)
74587517
preempt_dynamic_disable(preempt_schedule);
74597518
preempt_dynamic_disable(preempt_schedule_notrace);
74607519
preempt_dynamic_disable(irqentry_exit_cond_resched);
7520+
preempt_dynamic_key_disable(preempt_lazy);
74617521
if (mode != preempt_dynamic_mode)
74627522
pr_info("Dynamic Preempt: voluntary\n");
74637523
break;
@@ -7469,9 +7529,22 @@ static void __sched_dynamic_update(int mode)
74697529
preempt_dynamic_enable(preempt_schedule);
74707530
preempt_dynamic_enable(preempt_schedule_notrace);
74717531
preempt_dynamic_enable(irqentry_exit_cond_resched);
7532+
preempt_dynamic_key_disable(preempt_lazy);
74727533
if (mode != preempt_dynamic_mode)
74737534
pr_info("Dynamic Preempt: full\n");
74747535
break;
7536+
7537+
case preempt_dynamic_lazy:
7538+
if (!klp_override)
7539+
preempt_dynamic_disable(cond_resched);
7540+
preempt_dynamic_disable(might_resched);
7541+
preempt_dynamic_enable(preempt_schedule);
7542+
preempt_dynamic_enable(preempt_schedule_notrace);
7543+
preempt_dynamic_enable(irqentry_exit_cond_resched);
7544+
preempt_dynamic_key_enable(preempt_lazy);
7545+
if (mode != preempt_dynamic_mode)
7546+
pr_info("Dynamic Preempt: lazy\n");
7547+
break;
74757548
}
74767549

74777550
preempt_dynamic_mode = mode;
@@ -7534,6 +7607,8 @@ static void __init preempt_dynamic_init(void)
75347607
sched_dynamic_update(preempt_dynamic_none);
75357608
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
75367609
sched_dynamic_update(preempt_dynamic_voluntary);
7610+
} else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7611+
sched_dynamic_update(preempt_dynamic_lazy);
75377612
} else {
75387613
/* Default static call setting, nothing to do */
75397614
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
@@ -7554,6 +7629,7 @@ static void __init preempt_dynamic_init(void)
75547629
PREEMPT_MODEL_ACCESSOR(none);
75557630
PREEMPT_MODEL_ACCESSOR(voluntary);
75567631
PREEMPT_MODEL_ACCESSOR(full);
7632+
PREEMPT_MODEL_ACCESSOR(lazy);
75577633

75587634
#else /* !CONFIG_PREEMPT_DYNAMIC: */
75597635

kernel/sched/debug.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -245,11 +245,12 @@ static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
245245
static int sched_dynamic_show(struct seq_file *m, void *v)
246246
{
247247
static const char * preempt_modes[] = {
248-
"none", "voluntary", "full"
248+
"none", "voluntary", "full", "lazy",
249249
};
250+
int j = ARRAY_SIZE(preempt_modes) - !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
250251
int i;
251252

252-
for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
253+
for (i = 0; i < j; i++) {
253254
if (preempt_dynamic_mode == i)
254255
seq_puts(m, "(");
255256
seq_puts(m, preempt_modes[i]);

kernel/sched/fair.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
12511251
return;
12521252

12531253
if (resched || did_preempt_short(cfs_rq, curr)) {
1254-
resched_curr(rq);
1254+
resched_curr_lazy(rq);
12551255
clear_buddies(cfs_rq, curr);
12561256
}
12571257
}
@@ -5677,7 +5677,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
56775677
* validating it and just reschedule.
56785678
*/
56795679
if (queued) {
5680-
resched_curr(rq_of(cfs_rq));
5680+
resched_curr_lazy(rq_of(cfs_rq));
56815681
return;
56825682
}
56835683
#endif
@@ -8829,7 +8829,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
88298829
return;
88308830

88318831
preempt:
8832-
resched_curr(rq);
8832+
resched_curr_lazy(rq);
88338833
}
88348834

88358835
static struct task_struct *pick_task_fair(struct rq *rq)

kernel/sched/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2689,6 +2689,7 @@ extern void init_sched_rt_class(void);
26892689
extern void init_sched_fair_class(void);
26902690

26912691
extern void resched_curr(struct rq *rq);
2692+
extern void resched_curr_lazy(struct rq *rq);
26922693
extern void resched_cpu(int cpu);
26932694

26942695
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);

0 commit comments

Comments
 (0)