@@ -5055,6 +5055,135 @@ whisper_vad_context * whisper_vad_init_from_file_with_params_no_state(
50555055 WHISPER_LOG_INFO (" %s: encoder_0_bias: [%d]: %f\n " , __func__, i, read_b[i]);
50565056 }
50575057 }
5058+
5059+ {
5060+ // Print as F16
5061+ struct ggml_tensor * tensor = model.encoder_1_weight ;
5062+ std::vector<uint16_t > raw_data (ggml_nbytes (tensor) / sizeof (uint16_t ));
5063+ ggml_backend_tensor_get (tensor, raw_data.data (), 0 , ggml_nbytes (tensor));
5064+
5065+ // Convert first 10 values from F16 to F32 for display
5066+ for (int i = 0 ; i < 10 ; i++) {
5067+ float converted_value = ggml_fp16_to_fp32 (raw_data[i]);
5068+ WHISPER_LOG_INFO (" %s: model.encoder.1.reparam_conv: [%d]: %f (raw: 0x%04x)\n " ,
5069+ __func__, i, converted_value, raw_data[i]);
5070+ }
5071+ }
5072+
5073+ {
5074+ // Print as F32
5075+ struct ggml_tensor * tensor = model.encoder_1_bias ;
5076+ std::vector<float > read_b (ggml_nbytes (tensor));
5077+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5078+ for (int i = 0 ; i < 10 ; i++) {
5079+ WHISPER_LOG_INFO (" %s: encoder_1_bias: [%d]: %f\n " , __func__, i, read_b[i]);
5080+ }
5081+ }
5082+
5083+ {
5084+ // Print as F16
5085+ struct ggml_tensor * tensor = model.encoder_2_weight ;
5086+ std::vector<uint16_t > raw_data (ggml_nbytes (tensor) / sizeof (uint16_t ));
5087+ ggml_backend_tensor_get (tensor, raw_data.data (), 0 , ggml_nbytes (tensor));
5088+
5089+ // Convert first 10 values from F16 to F32 for display
5090+ for (int i = 0 ; i < 10 ; i++) {
5091+ float converted_value = ggml_fp16_to_fp32 (raw_data[i]);
5092+ WHISPER_LOG_INFO (" %s: model.encoder.2.reparam_conv: [%d]: %f (raw: 0x%04x)\n " ,
5093+ __func__, i, converted_value, raw_data[i]);
5094+ }
5095+ }
5096+
5097+ {
5098+ // Print as F32
5099+ struct ggml_tensor * tensor = model.encoder_2_bias ;
5100+ std::vector<float > read_b (ggml_nbytes (tensor));
5101+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5102+ for (int i = 0 ; i < 10 ; i++) {
5103+ WHISPER_LOG_INFO (" %s: encoder_2_bias: [%d]: %f\n " , __func__, i, read_b[i]);
5104+ }
5105+ }
5106+
5107+ {
5108+ // Print as F16
5109+ struct ggml_tensor * tensor = model.encoder_3_weight ;
5110+ std::vector<uint16_t > raw_data (ggml_nbytes (tensor) / sizeof (uint16_t ));
5111+ ggml_backend_tensor_get (tensor, raw_data.data (), 0 , ggml_nbytes (tensor));
5112+
5113+ // Convert first 10 values from F16 to F32 for display
5114+ for (int i = 0 ; i < 10 ; i++) {
5115+ float converted_value = ggml_fp16_to_fp32 (raw_data[i]);
5116+ WHISPER_LOG_INFO (" %s: model.encoder.2.reparam_conv: [%d]: %f (raw: 0x%04x)\n " ,
5117+ __func__, i, converted_value, raw_data[i]);
5118+ }
5119+ }
5120+
5121+ {
5122+ // Print as F32
5123+ struct ggml_tensor * tensor = model.encoder_3_bias ;
5124+ std::vector<float > read_b (ggml_nbytes (tensor));
5125+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5126+ for (int i = 0 ; i < 10 ; i++) {
5127+ WHISPER_LOG_INFO (" %s: encoder_3_bias: [%d]: %f\n " , __func__, i, read_b[i]);
5128+ }
5129+ }
5130+
5131+ {
5132+ // Print as F32
5133+ struct ggml_tensor * tensor = model.lstm_weight_ih ;
5134+ std::vector<float > read_b (ggml_nbytes (tensor));
5135+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5136+ for (int i = 0 ; i < 10 ; i++) {
5137+ WHISPER_LOG_INFO (" %s: lstm_weight_ih: [%d]: %f\n " , __func__, i, read_b[i]);
5138+ }
5139+ }
5140+ {
5141+ // Print as F32
5142+ struct ggml_tensor * tensor = model.lstm_bias_ih ;
5143+ std::vector<float > read_b (ggml_nbytes (tensor));
5144+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5145+ for (int i = 0 ; i < 10 ; i++) {
5146+ WHISPER_LOG_INFO (" %s: lstm_bias_ih: [%d]: %f\n " , __func__, i, read_b[i]);
5147+ }
5148+ }
5149+
5150+ {
5151+ // Print as F32
5152+ struct ggml_tensor * tensor = model.lstm_weight_hh ;
5153+ std::vector<float > read_b (ggml_nbytes (tensor));
5154+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5155+ for (int i = 0 ; i < 10 ; i++) {
5156+ WHISPER_LOG_INFO (" %s: lstm_weight_hh: [%d]: %f\n " , __func__, i, read_b[i]);
5157+ }
5158+ }
5159+ {
5160+ // Print as F32
5161+ struct ggml_tensor * tensor = model.lstm_bias_hh ;
5162+ std::vector<float > read_b (ggml_nbytes (tensor));
5163+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5164+ for (int i = 0 ; i < 10 ; i++) {
5165+ WHISPER_LOG_INFO (" %s: lstm_bias_hh: [%d]: %f\n " , __func__, i, read_b[i]);
5166+ }
5167+ }
5168+
5169+ {
5170+ // Print as F32
5171+ struct ggml_tensor * tensor = model.final_conv_weight ;
5172+ std::vector<float > read_b (ggml_nbytes (tensor));
5173+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5174+ for (int i = 0 ; i < 10 ; i++) {
5175+ WHISPER_LOG_INFO (" %s: final_conv_weight: [%d]: %f\n " , __func__, i, read_b[i]);
5176+ }
5177+ }
5178+ {
5179+ // Print as F32
5180+ struct ggml_tensor * tensor = model.final_conv_bias ;
5181+ std::vector<float > read_b (ggml_nbytes (tensor));
5182+ ggml_backend_tensor_get (tensor, read_b.data (), 0 , ggml_nbytes (tensor));
5183+ for (int i = 0 ; i < 10 ; i++) {
5184+ WHISPER_LOG_INFO (" %s: final_conv_bias: [%d]: %f\n " , __func__, i, read_b[i]);
5185+ }
5186+ }
50585187 }
50595188
50605189 return vctx;
0 commit comments