@@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
265
265
queue_work (sched -> submit_wq , & sched -> work_run_job );
266
266
}
267
267
268
+ /**
269
+ * drm_sched_free_job_queue - enqueue free-job work
270
+ * @sched: scheduler instance
271
+ */
272
+ static void drm_sched_free_job_queue (struct drm_gpu_scheduler * sched )
273
+ {
274
+ if (!READ_ONCE (sched -> pause_submit ))
275
+ queue_work (sched -> submit_wq , & sched -> work_free_job );
276
+ }
277
+
278
+ /**
279
+ * drm_sched_free_job_queue_if_done - enqueue free-job work if ready
280
+ * @sched: scheduler instance
281
+ */
282
+ static void drm_sched_free_job_queue_if_done (struct drm_gpu_scheduler * sched )
283
+ {
284
+ struct drm_sched_job * job ;
285
+
286
+ spin_lock (& sched -> job_list_lock );
287
+ job = list_first_entry_or_null (& sched -> pending_list ,
288
+ struct drm_sched_job , list );
289
+ if (job && dma_fence_is_signaled (& job -> s_fence -> finished ))
290
+ drm_sched_free_job_queue (sched );
291
+ spin_unlock (& sched -> job_list_lock );
292
+ }
293
+
268
294
/**
269
295
* drm_sched_job_done - complete a job
270
296
* @s_job: pointer to the job which is done
@@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
284
310
dma_fence_get (& s_fence -> finished );
285
311
drm_sched_fence_finished (s_fence , result );
286
312
dma_fence_put (& s_fence -> finished );
287
- drm_sched_run_job_queue (sched );
313
+ drm_sched_free_job_queue (sched );
288
314
}
289
315
290
316
/**
@@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
943
969
typeof (* next ), list );
944
970
945
971
if (next ) {
946
- next -> s_fence -> scheduled .timestamp =
947
- dma_fence_timestamp (& job -> s_fence -> finished );
972
+ if (test_bit (DMA_FENCE_FLAG_TIMESTAMP_BIT ,
973
+ & next -> s_fence -> scheduled .flags ))
974
+ next -> s_fence -> scheduled .timestamp =
975
+ dma_fence_timestamp (& job -> s_fence -> finished );
948
976
/* start TO timer for next job */
949
977
drm_sched_start_timeout (sched );
950
978
}
@@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
994
1022
EXPORT_SYMBOL (drm_sched_pick_best );
995
1023
996
1024
/**
997
- * drm_sched_run_job_work - main scheduler thread
1025
+ * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready
1026
+ * @sched: scheduler instance
1027
+ */
1028
+ static void drm_sched_run_job_queue_if_ready (struct drm_gpu_scheduler * sched )
1029
+ {
1030
+ if (drm_sched_select_entity (sched ))
1031
+ drm_sched_run_job_queue (sched );
1032
+ }
1033
+
1034
+ /**
1035
+ * drm_sched_free_job_work - worker to call free_job
1036
+ *
1037
+ * @w: free job work
1038
+ */
1039
+ static void drm_sched_free_job_work (struct work_struct * w )
1040
+ {
1041
+ struct drm_gpu_scheduler * sched =
1042
+ container_of (w , struct drm_gpu_scheduler , work_free_job );
1043
+ struct drm_sched_job * cleanup_job ;
1044
+
1045
+ if (READ_ONCE (sched -> pause_submit ))
1046
+ return ;
1047
+
1048
+ cleanup_job = drm_sched_get_cleanup_job (sched );
1049
+ if (cleanup_job ) {
1050
+ sched -> ops -> free_job (cleanup_job );
1051
+
1052
+ drm_sched_free_job_queue_if_done (sched );
1053
+ drm_sched_run_job_queue_if_ready (sched );
1054
+ }
1055
+ }
1056
+
1057
+ /**
1058
+ * drm_sched_run_job_work - worker to call run_job
998
1059
*
999
1060
* @w: run job work
1000
1061
*/
@@ -1003,65 +1064,51 @@ static void drm_sched_run_job_work(struct work_struct *w)
1003
1064
struct drm_gpu_scheduler * sched =
1004
1065
container_of (w , struct drm_gpu_scheduler , work_run_job );
1005
1066
struct drm_sched_entity * entity ;
1006
- struct drm_sched_job * cleanup_job ;
1067
+ struct dma_fence * fence ;
1068
+ struct drm_sched_fence * s_fence ;
1069
+ struct drm_sched_job * sched_job ;
1007
1070
int r ;
1008
1071
1009
1072
if (READ_ONCE (sched -> pause_submit ))
1010
1073
return ;
1011
1074
1012
- cleanup_job = drm_sched_get_cleanup_job (sched );
1013
1075
entity = drm_sched_select_entity (sched );
1076
+ if (!entity )
1077
+ return ;
1014
1078
1015
- if (!entity && !cleanup_job )
1079
+ sched_job = drm_sched_entity_pop_job (entity );
1080
+ if (!sched_job ) {
1081
+ complete_all (& entity -> entity_idle );
1016
1082
return ; /* No more work */
1083
+ }
1017
1084
1018
- if (cleanup_job )
1019
- sched -> ops -> free_job (cleanup_job );
1020
-
1021
- if (entity ) {
1022
- struct dma_fence * fence ;
1023
- struct drm_sched_fence * s_fence ;
1024
- struct drm_sched_job * sched_job ;
1025
-
1026
- sched_job = drm_sched_entity_pop_job (entity );
1027
- if (!sched_job ) {
1028
- complete_all (& entity -> entity_idle );
1029
- if (!cleanup_job )
1030
- return ; /* No more work */
1031
- goto again ;
1032
- }
1033
-
1034
- s_fence = sched_job -> s_fence ;
1035
-
1036
- atomic_inc (& sched -> hw_rq_count );
1037
- drm_sched_job_begin (sched_job );
1085
+ s_fence = sched_job -> s_fence ;
1038
1086
1039
- trace_drm_run_job (sched_job , entity );
1040
- fence = sched -> ops -> run_job (sched_job );
1041
- complete_all (& entity -> entity_idle );
1042
- drm_sched_fence_scheduled (s_fence , fence );
1087
+ atomic_inc (& sched -> hw_rq_count );
1088
+ drm_sched_job_begin (sched_job );
1043
1089
1044
- if (!IS_ERR_OR_NULL (fence )) {
1045
- /* Drop for original kref_init of the fence */
1046
- dma_fence_put (fence );
1090
+ trace_drm_run_job (sched_job , entity );
1091
+ fence = sched -> ops -> run_job (sched_job );
1092
+ complete_all (& entity -> entity_idle );
1093
+ drm_sched_fence_scheduled (s_fence , fence );
1047
1094
1048
- r = dma_fence_add_callback (fence , & sched_job -> cb ,
1049
- drm_sched_job_done_cb );
1050
- if (r == - ENOENT )
1051
- drm_sched_job_done (sched_job , fence -> error );
1052
- else if (r )
1053
- DRM_DEV_ERROR (sched -> dev , "fence add callback failed (%d)\n" ,
1054
- r );
1055
- } else {
1056
- drm_sched_job_done (sched_job , IS_ERR (fence ) ?
1057
- PTR_ERR (fence ) : 0 );
1058
- }
1095
+ if (!IS_ERR_OR_NULL (fence )) {
1096
+ /* Drop for original kref_init of the fence */
1097
+ dma_fence_put (fence );
1059
1098
1060
- wake_up (& sched -> job_scheduled );
1099
+ r = dma_fence_add_callback (fence , & sched_job -> cb ,
1100
+ drm_sched_job_done_cb );
1101
+ if (r == - ENOENT )
1102
+ drm_sched_job_done (sched_job , fence -> error );
1103
+ else if (r )
1104
+ DRM_DEV_ERROR (sched -> dev , "fence add callback failed (%d)\n" , r );
1105
+ } else {
1106
+ drm_sched_job_done (sched_job , IS_ERR (fence ) ?
1107
+ PTR_ERR (fence ) : 0 );
1061
1108
}
1062
1109
1063
- again :
1064
- drm_sched_run_job_queue (sched );
1110
+ wake_up ( & sched -> job_scheduled );
1111
+ drm_sched_run_job_queue_if_ready (sched );
1065
1112
}
1066
1113
1067
1114
/**
@@ -1145,6 +1192,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
1145
1192
atomic_set (& sched -> hw_rq_count , 0 );
1146
1193
INIT_DELAYED_WORK (& sched -> work_tdr , drm_sched_job_timedout );
1147
1194
INIT_WORK (& sched -> work_run_job , drm_sched_run_job_work );
1195
+ INIT_WORK (& sched -> work_free_job , drm_sched_free_job_work );
1148
1196
atomic_set (& sched -> _score , 0 );
1149
1197
atomic64_set (& sched -> job_id_count , 0 );
1150
1198
sched -> pause_submit = false;
@@ -1274,6 +1322,7 @@ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1274
1322
{
1275
1323
WRITE_ONCE (sched -> pause_submit , true);
1276
1324
cancel_work_sync (& sched -> work_run_job );
1325
+ cancel_work_sync (& sched -> work_free_job );
1277
1326
}
1278
1327
EXPORT_SYMBOL (drm_sched_wqueue_stop );
1279
1328
@@ -1286,5 +1335,6 @@ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1286
1335
{
1287
1336
WRITE_ONCE (sched -> pause_submit , false);
1288
1337
queue_work (sched -> submit_wq , & sched -> work_run_job );
1338
+ queue_work (sched -> submit_wq , & sched -> work_free_job );
1289
1339
}
1290
1340
EXPORT_SYMBOL (drm_sched_wqueue_start );
0 commit comments