Skip to content

Commit f4423f4

Browse files
committed
vad : rename lstm weights for consistency
1 parent c938655 commit f4423f4

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

src/whisper.cpp

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4388,10 +4388,10 @@ struct whisper_vad_model {
43884388
struct ggml_tensor * encoder_3_bias; // [128]
43894389

43904390
// LSTM decoder tensors
4391-
struct ggml_tensor * lstm_weight_ih; // [512, 128] input-to-hidden
4392-
struct ggml_tensor * lstm_bias_ih; // [512]
4393-
struct ggml_tensor * lstm_weight_hh; // [512, 128] hidden-to-hidden
4394-
struct ggml_tensor * lstm_bias_hh; // [512]
4391+
struct ggml_tensor * lstm_ih_weight; // [512, 128] input-to-hidden
4392+
struct ggml_tensor * lstm_ih_bias; // [512]
4393+
struct ggml_tensor * lstm_hh_weight; // [512, 128] hidden-to-hidden
4394+
struct ggml_tensor * lstm_hh_bias; // [512]
43954395

43964396
// Final conv layer
43974397
struct ggml_tensor * final_conv_weight; // [1, 128, 1]
@@ -4536,12 +4536,12 @@ static ggml_tensor * whisper_vad_build_lstm_layer(ggml_context* ctx0,
45364536
ggml_set_input(c_in);
45374537

45384538
// Create operations using the input-to-hidden weights.
4539-
struct ggml_tensor * inp_gate = ggml_mul_mat(ctx0, model.lstm_weight_ih, x_t);
4540-
inp_gate = ggml_add(ctx0, inp_gate, model.lstm_bias_ih);
4539+
struct ggml_tensor * inp_gate = ggml_mul_mat(ctx0, model.lstm_ih_weight, x_t);
4540+
inp_gate = ggml_add(ctx0, inp_gate, model.lstm_ih_bias);
45414541

45424542
// Create operations using the hidden-to-hidden weights.
4543-
struct ggml_tensor * hid_gate = ggml_mul_mat(ctx0, model.lstm_weight_hh, h_in);
4544-
hid_gate = ggml_add(ctx0, hid_gate, model.lstm_bias_hh);
4543+
struct ggml_tensor * hid_gate = ggml_mul_mat(ctx0, model.lstm_hh_weight, h_in);
4544+
hid_gate = ggml_add(ctx0, hid_gate, model.lstm_hh_bias);
45454545

45464546
// Create add operation to get preactivations for all gates.
45474547
struct ggml_tensor * out_gate = ggml_add(ctx0, inp_gate, hid_gate);
@@ -4883,25 +4883,25 @@ whisper_vad_context * whisper_vad_init_from_file_with_params_no_state(
48834883
const int hstate_dim = hparams.lstm_hidden_size * 4;
48844884

48854885
// LSTM weights - input to hidden
4886-
model.lstm_weight_ih = create_tensor(
4886+
model.lstm_ih_weight = create_tensor(
48874887
VAD_TENSOR_LSTM_WEIGHT_IH,
48884888
ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hparams.lstm_hidden_size, hstate_dim)
48894889
);
48904890

48914891
// LSTM weights - hidden to hidden
4892-
model.lstm_weight_hh = create_tensor(
4892+
model.lstm_hh_weight = create_tensor(
48934893
VAD_TENSOR_LSTM_WEIGHT_HH,
48944894
ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hparams.lstm_hidden_size, hstate_dim)
48954895
);
48964896

48974897
// LSTM bias - input to hidden
4898-
model.lstm_bias_ih = create_tensor(
4898+
model.lstm_ih_bias = create_tensor(
48994899
VAD_TENSOR_LSTM_BIAS_IH,
49004900
ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hstate_dim)
49014901
);
49024902

49034903
// LSTM bias - hidden to hidden
4904-
model.lstm_bias_hh = create_tensor(
4904+
model.lstm_hh_bias = create_tensor(
49054905
VAD_TENSOR_LSTM_BIAS_HH,
49064906
ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hstate_dim)
49074907
);
@@ -5130,39 +5130,39 @@ whisper_vad_context * whisper_vad_init_from_file_with_params_no_state(
51305130

51315131
{
51325132
// Print as F32
5133-
struct ggml_tensor * tensor = model.lstm_weight_ih;
5133+
struct ggml_tensor * tensor = model.lstm_ih_weight;
51345134
std::vector<float> read_b(ggml_nbytes(tensor));
51355135
ggml_backend_tensor_get(tensor, read_b.data(), 0, ggml_nbytes(tensor));
51365136
for (int i = 0; i < 10; i++) {
5137-
WHISPER_LOG_INFO("%s: lstm_weight_ih: [%d]: %f\n", __func__, i, read_b[i]);
5137+
WHISPER_LOG_INFO("%s: lstm_ih_weight: [%d]: %f\n", __func__, i, read_b[i]);
51385138
}
51395139
}
51405140
{
51415141
// Print as F32
5142-
struct ggml_tensor * tensor = model.lstm_bias_ih;
5142+
struct ggml_tensor * tensor = model.lstm_ih_bias;
51435143
std::vector<float> read_b(ggml_nbytes(tensor));
51445144
ggml_backend_tensor_get(tensor, read_b.data(), 0, ggml_nbytes(tensor));
51455145
for (int i = 0; i < 10; i++) {
5146-
WHISPER_LOG_INFO("%s: lstm_bias_ih: [%d]: %f\n", __func__, i, read_b[i]);
5146+
WHISPER_LOG_INFO("%s: lstm_ih_bias: [%d]: %f\n", __func__, i, read_b[i]);
51475147
}
51485148
}
51495149

51505150
{
51515151
// Print as F32
5152-
struct ggml_tensor * tensor = model.lstm_weight_hh;
5152+
struct ggml_tensor * tensor = model.lstm_hh_weight;
51535153
std::vector<float> read_b(ggml_nbytes(tensor));
51545154
ggml_backend_tensor_get(tensor, read_b.data(), 0, ggml_nbytes(tensor));
51555155
for (int i = 0; i < 10; i++) {
5156-
WHISPER_LOG_INFO("%s: lstm_weight_hh: [%d]: %f\n", __func__, i, read_b[i]);
5156+
WHISPER_LOG_INFO("%s: lstm_hh_weight: [%d]: %f\n", __func__, i, read_b[i]);
51575157
}
51585158
}
51595159
{
51605160
// Print as F32
5161-
struct ggml_tensor * tensor = model.lstm_bias_hh;
5161+
struct ggml_tensor * tensor = model.lstm_hh_bias;
51625162
std::vector<float> read_b(ggml_nbytes(tensor));
51635163
ggml_backend_tensor_get(tensor, read_b.data(), 0, ggml_nbytes(tensor));
51645164
for (int i = 0; i < 10; i++) {
5165-
WHISPER_LOG_INFO("%s: lstm_bias_hh: [%d]: %f\n", __func__, i, read_b[i]);
5165+
WHISPER_LOG_INFO("%s: lstm_hh_bias: [%d]: %f\n", __func__, i, read_b[i]);
51665166
}
51675167
}
51685168

0 commit comments

Comments
 (0)