|
13 | 13 | import data_utils.augmentor.trans_add_delta as trans_add_delta |
14 | 14 | import data_utils.augmentor.trans_splice as trans_splice |
15 | 15 | import data_utils.async_data_reader as reader |
| 16 | +import decoder.decoder as decoder |
16 | 17 | from data_utils.util import lodtensor_to_ndarray |
17 | 18 | from model_utils.model import stacked_lstmp_model |
| 19 | +from data_utils.util import split_infer_result |
18 | 20 |
|
19 | 21 |
|
20 | 22 | def parse_args(): |
@@ -141,13 +143,20 @@ def infer_from_ckpt(args): |
141 | 143 |
|
142 | 144 | infer_data_reader.recycle(features, labels, lod) |
143 | 145 |
|
144 | | - cost, acc = exe.run(infer_program, |
145 | | - feed={"feature": feature_t, |
146 | | - "label": label_t}, |
147 | | - fetch_list=[avg_cost, accuracy], |
148 | | - return_numpy=False) |
149 | | - infer_costs.append(lodtensor_to_ndarray(cost)[0]) |
150 | | - infer_accs.append(lodtensor_to_ndarray(acc)[0]) |
| 146 | + results = exe.run(infer_program, |
| 147 | + feed={"feature": feature_t, |
| 148 | + "label": label_t}, |
| 149 | + fetch_list=[prediction, avg_cost, accuracy], |
| 150 | + return_numpy=False) |
| 151 | + infer_costs.append(lodtensor_to_ndarray(results[1])[0]) |
| 152 | + infer_accs.append(lodtensor_to_ndarray(results[2])[0]) |
| 153 | + |
| 154 | + probs, lod = lodtensor_to_ndarray(results[0]) |
| 155 | + infer_batch = split_infer_result(probs, lod) |
| 156 | + for index, sample in enumerate(infer_batch): |
| 157 | + print("Decoding %d: " % (batch_id * args.batch_size + index), |
| 158 | + decoder.decode(sample)) |
| 159 | + |
151 | 160 | print(np.mean(infer_costs), np.mean(infer_accs)) |
152 | 161 |
|
153 | 162 |
|
|
0 commit comments