1
1
import pytest
2
+ import json
2
3
from datetime import datetime , timezone
3
4
from label_studio_sdk .label_interface .interface import LabelInterface
4
5
from label_studio_sdk .label_interface .control_tags import ControlTag
@@ -200,7 +201,119 @@ def test_to_json_schema(config, expected_json_schema, input_arg, expected_result
200
201
json_schema = interface .to_json_schema ()
201
202
assert json_schema == expected_json_schema
202
203
203
- # convert JSON Schema to Pydantic
204
204
with json_schema_to_pydantic (json_schema ) as ResponseModel :
205
205
instance = ResponseModel (** input_arg )
206
- assert instance .model_dump () == expected_result
206
+ assert instance .model_dump () == expected_result
207
+
208
+
209
+
210
+ def process_json_schema (json_schema , input_arg , queue ):
211
+ with json_schema_to_pydantic (json_schema ) as ResponseModel :
212
+ instance = ResponseModel (** input_arg )
213
+ queue .put (instance .model_dump ())
214
+
215
+ def test_concurrent_json_schema_to_pydantic ():
216
+ import multiprocessing
217
+ json_schema = {
218
+ "type" : "object" ,
219
+ "properties" : {
220
+ "sentiment" : {
221
+ "type" : "string" ,
222
+ "description" : "Choices for doc" ,
223
+ "enum" : ["Positive" , "Negative" , "Neutral" ],
224
+ }
225
+ },
226
+ "required" : ["sentiment" ]
227
+ }
228
+ input_arg1 = {"sentiment" : "Positive" }
229
+ input_arg2 = {"sentiment" : "Negative" }
230
+
231
+ queue = multiprocessing .Queue ()
232
+
233
+ p1 = multiprocessing .Process (target = process_json_schema , args = (json_schema , input_arg1 , queue ))
234
+ p2 = multiprocessing .Process (target = process_json_schema , args = (json_schema , input_arg2 , queue ))
235
+
236
+ p1 .start ()
237
+ p2 .start ()
238
+
239
+ p1 .join ()
240
+ p2 .join ()
241
+
242
+ results = [queue .get () for _ in range (2 )]
243
+
244
+ assert {"sentiment" : "Positive" } in results
245
+ assert {"sentiment" : "Negative" } in results
246
+ assert len (results ) == 2
247
+
248
+
249
+ def process_json_schema_threaded (json_schema , input_arg , results , index ):
250
+ with json_schema_to_pydantic (json_schema ) as ResponseModel :
251
+ instance = ResponseModel (** input_arg )
252
+ results [index ] = instance .model_dump ()
253
+
254
+ def test_concurrent_json_schema_to_pydantic_threaded ():
255
+ import threading
256
+ import time
257
+
258
+ json_schema = {
259
+ "type" : "object" ,
260
+ "properties" : {
261
+ "sentiment" : {
262
+ "type" : "string" ,
263
+ "description" : "Choices for doc" ,
264
+ "enum" : ["Positive" , "Negative" , "Neutral" ],
265
+ }
266
+ },
267
+ "required" : ["sentiment" ]
268
+ }
269
+ input_args = [
270
+ {"sentiment" : "Positive" },
271
+ {"sentiment" : "Negative" },
272
+ {"sentiment" : "Neutral" },
273
+ {"sentiment" : "Positive" }
274
+ ]
275
+
276
+ results = [None ] * len (input_args )
277
+ threads = []
278
+
279
+ # Create and start threads
280
+ for i , input_arg in enumerate (input_args ):
281
+ thread = threading .Thread (target = process_json_schema_threaded , args = (json_schema , input_arg , results , i ))
282
+ threads .append (thread )
283
+ thread .start ()
284
+
285
+ # Wait for all threads to complete
286
+ for thread in threads :
287
+ thread .join ()
288
+
289
+ # Verify results
290
+ assert {"sentiment" : "Positive" } in results
291
+ assert {"sentiment" : "Negative" } in results
292
+ assert {"sentiment" : "Neutral" } in results
293
+ assert results .count ({"sentiment" : "Positive" }) == 2
294
+ assert len (results ) == 4
295
+ assert None not in results
296
+
297
+ # Verify thread safety by running multiple times
298
+ for _ in range (10 ):
299
+ results = [None ] * len (input_args )
300
+ threads = []
301
+
302
+ start_time = time .time ()
303
+ for i , input_arg in enumerate (input_args ):
304
+ thread = threading .Thread (target = process_json_schema_threaded , args = (json_schema , input_arg , results , i ))
305
+ threads .append (thread )
306
+ thread .start ()
307
+
308
+ for thread in threads :
309
+ thread .join ()
310
+
311
+ end_time = time .time ()
312
+
313
+ assert set (result ["sentiment" ] for result in results ) == set (["Positive" , "Negative" , "Neutral" ])
314
+ assert results .count ({"sentiment" : "Positive" }) == 2
315
+ assert len (results ) == 4
316
+ assert None not in results
317
+
318
+ # Check if execution time is reasonable (adjust as needed)
319
+ assert end_time - start_time < 1.0 , f"Execution took too long: { end_time - start_time } seconds"
0 commit comments