@@ -1083,6 +1083,13 @@ static void __resched_curr(struct rq *rq, int tif)
1083
1083
1084
1084
lockdep_assert_rq_held (rq );
1085
1085
1086
+ /*
1087
+ * Always immediately preempt the idle task; no point in delaying doing
1088
+ * actual work.
1089
+ */
1090
+ if (is_idle_task (curr ) && tif == TIF_NEED_RESCHED_LAZY )
1091
+ tif = TIF_NEED_RESCHED ;
1092
+
1086
1093
if (cti -> flags & ((1 << tif ) | _TIF_NEED_RESCHED ))
1087
1094
return ;
1088
1095
@@ -1108,6 +1115,32 @@ void resched_curr(struct rq *rq)
1108
1115
__resched_curr (rq , TIF_NEED_RESCHED );
1109
1116
}
1110
1117
1118
+ #ifdef CONFIG_PREEMPT_DYNAMIC
1119
+ static DEFINE_STATIC_KEY_FALSE (sk_dynamic_preempt_lazy );
1120
+ static __always_inline bool dynamic_preempt_lazy (void )
1121
+ {
1122
+ return static_branch_unlikely (& sk_dynamic_preempt_lazy );
1123
+ }
1124
+ #else
1125
+ static __always_inline bool dynamic_preempt_lazy (void )
1126
+ {
1127
+ return IS_ENABLED (CONFIG_PREEMPT_LAZY );
1128
+ }
1129
+ #endif
1130
+
1131
+ static __always_inline int get_lazy_tif_bit (void )
1132
+ {
1133
+ if (dynamic_preempt_lazy ())
1134
+ return TIF_NEED_RESCHED_LAZY ;
1135
+
1136
+ return TIF_NEED_RESCHED ;
1137
+ }
1138
+
1139
+ void resched_curr_lazy (struct rq * rq )
1140
+ {
1141
+ __resched_curr (rq , get_lazy_tif_bit ());
1142
+ }
1143
+
1111
1144
void resched_cpu (int cpu )
1112
1145
{
1113
1146
struct rq * rq = cpu_rq (cpu );
@@ -5612,6 +5645,10 @@ void sched_tick(void)
5612
5645
update_rq_clock (rq );
5613
5646
hw_pressure = arch_scale_hw_pressure (cpu_of (rq ));
5614
5647
update_hw_load_avg (rq_clock_task (rq ), rq , hw_pressure );
5648
+
5649
+ if (dynamic_preempt_lazy () && tif_test_bit (TIF_NEED_RESCHED_LAZY ))
5650
+ resched_curr (rq );
5651
+
5615
5652
donor -> sched_class -> task_tick (rq , donor , 0 );
5616
5653
if (sched_feat (LATENCY_WARN ))
5617
5654
resched_latency = cpu_resched_latency (rq );
@@ -7374,27 +7411,39 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
7374
7411
* preempt_schedule <- NOP
7375
7412
* preempt_schedule_notrace <- NOP
7376
7413
* irqentry_exit_cond_resched <- NOP
7414
+ * dynamic_preempt_lazy <- false
7377
7415
*
7378
7416
* VOLUNTARY:
7379
7417
* cond_resched <- __cond_resched
7380
7418
* might_resched <- __cond_resched
7381
7419
* preempt_schedule <- NOP
7382
7420
* preempt_schedule_notrace <- NOP
7383
7421
* irqentry_exit_cond_resched <- NOP
7422
+ * dynamic_preempt_lazy <- false
7384
7423
*
7385
7424
* FULL:
7386
7425
* cond_resched <- RET0
7387
7426
* might_resched <- RET0
7388
7427
* preempt_schedule <- preempt_schedule
7389
7428
* preempt_schedule_notrace <- preempt_schedule_notrace
7390
7429
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7430
+ * dynamic_preempt_lazy <- false
7431
+ *
7432
+ * LAZY:
7433
+ * cond_resched <- RET0
7434
+ * might_resched <- RET0
7435
+ * preempt_schedule <- preempt_schedule
7436
+ * preempt_schedule_notrace <- preempt_schedule_notrace
7437
+ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7438
+ * dynamic_preempt_lazy <- true
7391
7439
*/
7392
7440
7393
7441
enum {
7394
7442
preempt_dynamic_undefined = -1 ,
7395
7443
preempt_dynamic_none ,
7396
7444
preempt_dynamic_voluntary ,
7397
7445
preempt_dynamic_full ,
7446
+ preempt_dynamic_lazy ,
7398
7447
};
7399
7448
7400
7449
int preempt_dynamic_mode = preempt_dynamic_undefined ;
@@ -7410,15 +7459,23 @@ int sched_dynamic_mode(const char *str)
7410
7459
if (!strcmp (str , "full" ))
7411
7460
return preempt_dynamic_full ;
7412
7461
7462
+ #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7463
+ if (!strcmp (str , "lazy" ))
7464
+ return preempt_dynamic_lazy ;
7465
+ #endif
7466
+
7413
7467
return - EINVAL ;
7414
7468
}
7415
7469
7470
+ #define preempt_dynamic_key_enable (f ) static_key_enable(&sk_dynamic_##f.key)
7471
+ #define preempt_dynamic_key_disable (f ) static_key_disable(&sk_dynamic_##f.key)
7472
+
7416
7473
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL )
7417
7474
#define preempt_dynamic_enable (f ) static_call_update(f, f##_dynamic_enabled)
7418
7475
#define preempt_dynamic_disable (f ) static_call_update(f, f##_dynamic_disabled)
7419
7476
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY )
7420
- #define preempt_dynamic_enable (f ) static_key_enable(&sk_dynamic_##f.key )
7421
- #define preempt_dynamic_disable (f ) static_key_disable(&sk_dynamic_##f.key )
7477
+ #define preempt_dynamic_enable (f ) preempt_dynamic_key_enable(f )
7478
+ #define preempt_dynamic_disable (f ) preempt_dynamic_key_disable(f )
7422
7479
#else
7423
7480
#error "Unsupported PREEMPT_DYNAMIC mechanism"
7424
7481
#endif
@@ -7438,6 +7495,7 @@ static void __sched_dynamic_update(int mode)
7438
7495
preempt_dynamic_enable (preempt_schedule );
7439
7496
preempt_dynamic_enable (preempt_schedule_notrace );
7440
7497
preempt_dynamic_enable (irqentry_exit_cond_resched );
7498
+ preempt_dynamic_key_disable (preempt_lazy );
7441
7499
7442
7500
switch (mode ) {
7443
7501
case preempt_dynamic_none :
@@ -7447,6 +7505,7 @@ static void __sched_dynamic_update(int mode)
7447
7505
preempt_dynamic_disable (preempt_schedule );
7448
7506
preempt_dynamic_disable (preempt_schedule_notrace );
7449
7507
preempt_dynamic_disable (irqentry_exit_cond_resched );
7508
+ preempt_dynamic_key_disable (preempt_lazy );
7450
7509
if (mode != preempt_dynamic_mode )
7451
7510
pr_info ("Dynamic Preempt: none\n" );
7452
7511
break ;
@@ -7458,6 +7517,7 @@ static void __sched_dynamic_update(int mode)
7458
7517
preempt_dynamic_disable (preempt_schedule );
7459
7518
preempt_dynamic_disable (preempt_schedule_notrace );
7460
7519
preempt_dynamic_disable (irqentry_exit_cond_resched );
7520
+ preempt_dynamic_key_disable (preempt_lazy );
7461
7521
if (mode != preempt_dynamic_mode )
7462
7522
pr_info ("Dynamic Preempt: voluntary\n" );
7463
7523
break ;
@@ -7469,9 +7529,22 @@ static void __sched_dynamic_update(int mode)
7469
7529
preempt_dynamic_enable (preempt_schedule );
7470
7530
preempt_dynamic_enable (preempt_schedule_notrace );
7471
7531
preempt_dynamic_enable (irqentry_exit_cond_resched );
7532
+ preempt_dynamic_key_disable (preempt_lazy );
7472
7533
if (mode != preempt_dynamic_mode )
7473
7534
pr_info ("Dynamic Preempt: full\n" );
7474
7535
break ;
7536
+
7537
+ case preempt_dynamic_lazy :
7538
+ if (!klp_override )
7539
+ preempt_dynamic_disable (cond_resched );
7540
+ preempt_dynamic_disable (might_resched );
7541
+ preempt_dynamic_enable (preempt_schedule );
7542
+ preempt_dynamic_enable (preempt_schedule_notrace );
7543
+ preempt_dynamic_enable (irqentry_exit_cond_resched );
7544
+ preempt_dynamic_key_enable (preempt_lazy );
7545
+ if (mode != preempt_dynamic_mode )
7546
+ pr_info ("Dynamic Preempt: lazy\n" );
7547
+ break ;
7475
7548
}
7476
7549
7477
7550
preempt_dynamic_mode = mode ;
@@ -7534,6 +7607,8 @@ static void __init preempt_dynamic_init(void)
7534
7607
sched_dynamic_update (preempt_dynamic_none );
7535
7608
} else if (IS_ENABLED (CONFIG_PREEMPT_VOLUNTARY )) {
7536
7609
sched_dynamic_update (preempt_dynamic_voluntary );
7610
+ } else if (IS_ENABLED (CONFIG_PREEMPT_LAZY )) {
7611
+ sched_dynamic_update (preempt_dynamic_lazy );
7537
7612
} else {
7538
7613
/* Default static call setting, nothing to do */
7539
7614
WARN_ON_ONCE (!IS_ENABLED (CONFIG_PREEMPT ));
@@ -7554,6 +7629,7 @@ static void __init preempt_dynamic_init(void)
7554
7629
PREEMPT_MODEL_ACCESSOR (none );
7555
7630
PREEMPT_MODEL_ACCESSOR (voluntary );
7556
7631
PREEMPT_MODEL_ACCESSOR (full );
7632
+ PREEMPT_MODEL_ACCESSOR (lazy );
7557
7633
7558
7634
#else /* !CONFIG_PREEMPT_DYNAMIC: */
7559
7635
0 commit comments