11"""
22Unified VolView API for GTC 2025 Demo.
33
4- Combines Vista3D segmentation and multimodal LLM chat features with modular,
5- lazy loading for optimal performance.
4+ Combines Vista3D segmentation, multimodal LLM chat, and Clara Generate
5+ (synthetic CT generation) features with modular, lazy loading for optimal performance.
66
77Features can be controlled via .env file or environment variables:
88- ENABLE_VISTA3D=true (default: true)
99- ENABLE_LLM_CHAT=true (default: true)
10+ - ENABLE_CLARA_GENERATE=true (default: true)
1011- VISTA3D_BUNDLE_DIR=path (default: "bundles/")
12+ - CLARA_GENERATE_BUNDLE_DIR=path (default: "bundles/")
1113
1214Configuration Priority:
13151. Environment variables (highest)
4244# Feature flags (read from environment, default to true)
4345ENABLE_VISTA3D = os .getenv ("ENABLE_VISTA3D" , "true" ).lower () in ("true" , "1" , "yes" )
4446ENABLE_LLM_CHAT = os .getenv ("ENABLE_LLM_CHAT" , "true" ).lower () in ("true" , "1" , "yes" )
47+ ENABLE_CLARA_GENERATE = os .getenv ("ENABLE_CLARA_GENERATE" , "true" ).lower () in ("true" , "1" , "yes" )
4548
4649print ("=" * 80 )
4750print ("VolView Server - GTC 2025 Demo" )
4851print ("=" * 80 )
4952print (f"ENABLE_VISTA3D: { ENABLE_VISTA3D } " )
5053print (f"ENABLE_LLM_CHAT: { ENABLE_LLM_CHAT } " )
54+ print (f"ENABLE_CLARA_GENERATE: { ENABLE_CLARA_GENERATE } " )
5155print ("=" * 80 )
5256
5357# ============================================================================
@@ -390,6 +394,104 @@ async def multimodal_llm_analysis(
390394else :
391395 print ("✗ LLM Chat feature disabled" )
392396
397+ # ============================================================================
398+ # CLARA GENERATE FEATURE - Synthetic CT Image Generation
399+ # ============================================================================
400+
401+ if ENABLE_CLARA_GENERATE :
402+ print ("Loading Clara Generate feature..." )
403+
404+ import asyncio
405+ from concurrent .futures import ProcessPoolExecutor
406+ from typing import Tuple
407+
408+ # Import the inference function
409+ from volview_clara_generate_inference import run_clara_generate_inference
410+
411+ # Process pool for CPU-intensive generation (separate from Vista3D)
412+ clara_generate_process_pool = ProcessPoolExecutor (max_workers = 1 )
413+
414+ # Bundle configuration
415+ CLARA_GENERATE_BUNDLE_DIR = os .getenv ("CLARA_GENERATE_BUNDLE_DIR" , "bundles/" )
416+
417+ def do_clara_generate_inference (
418+ output_size : list ,
419+ spacing : list ,
420+ ) -> Tuple [bytes , bytes ]:
421+ """
422+ Runs Clara Generate (MAISI) inference in a separate process.
423+
424+ Returns:
425+ Tuple of (image_blob, mask_blob) as bytes.
426+ """
427+ return run_clara_generate_inference (
428+ output_size = output_size ,
429+ spacing = spacing ,
430+ bundle_dir = CLARA_GENERATE_BUNDLE_DIR ,
431+ )
432+
433+ async def run_clara_generate_process (
434+ output_size : list , spacing : list
435+ ) -> Tuple [bytes , bytes ]:
436+ """
437+ Asynchronously runs Clara Generate inference in the process pool.
438+ """
439+ loop = asyncio .get_event_loop ()
440+ result = await loop .run_in_executor (
441+ clara_generate_process_pool ,
442+ do_clara_generate_inference ,
443+ output_size ,
444+ spacing ,
445+ )
446+ return result
447+
448+ @volview .expose ("generateWithClara" )
449+ async def generate_with_clara (
450+ output_size : list = None ,
451+ spacing : list = None ,
452+ ):
453+ """
454+ Exposes Clara Generate (MAISI) synthetic CT generation to the VolView client.
455+
456+ Args:
457+ output_size: Volume size [x, y, z] (default: [256, 256, 128]).
458+ X,Y must be from [256, 384, 512].
459+ Z must be from [128, 256, 384, 512, 640, 768].
460+ spacing: Voxel size [x, y, z] in mm (default: [1.5, 1.5, 1.5]).
461+
462+ Returns:
463+ dict: Contains 'image' and 'mask' as byte arrays (for JSON serialization).
464+ """
465+ # Set defaults
466+ if output_size is None :
467+ output_size = [256 , 256 , 128 ]
468+ if spacing is None :
469+ spacing = [1.5 , 1.5 , 1.5 ]
470+
471+ print (
472+ f"Received Clara Generate request: size={ output_size } , spacing={ spacing } "
473+ )
474+
475+ image_blob , mask_blob = await run_clara_generate_process (
476+ output_size , spacing
477+ )
478+
479+ print (
480+ f"Successfully generated synthetic CT. "
481+ f"Returning { len (image_blob )} byte image and { len (mask_blob )} byte mask to client."
482+ )
483+
484+ # Convert bytes to lists for JSON serialization over Socket.IO
485+ return {
486+ "image" : list (image_blob ),
487+ "mask" : list (mask_blob ),
488+ }
489+
490+ print ("✓ Clara Generate feature loaded (generateWithClara endpoint available)" )
491+
492+ else :
493+ print ("✗ Clara Generate feature disabled" )
494+
393495# ============================================================================
394496# Summary
395497# ============================================================================
@@ -400,10 +502,12 @@ async def multimodal_llm_analysis(
400502 available_endpoints .append ("segmentWithMONAI" )
401503if ENABLE_LLM_CHAT :
402504 available_endpoints .append ("multimodalLlmAnalysis" )
505+ if ENABLE_CLARA_GENERATE :
506+ available_endpoints .append ("generateWithClara" )
403507
404508if available_endpoints :
405509 print (f"Available RPC endpoints: { ', ' .join (available_endpoints )} " )
406510else :
407- print ("WARNING: No features enabled! Set ENABLE_VISTA3D=true or ENABLE_LLM_CHAT =true" )
511+ print ("WARNING: No features enabled! Set ENABLE_VISTA3D=true, ENABLE_LLM_CHAT=true, or ENABLE_CLARA_GENERATE =true" )
408512
409513print ("=" * 80 )
0 commit comments