|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <cstdio> |
| 10 | +#include <cstring> |
| 11 | +#include <fstream> |
| 12 | +#include <vector> |
| 13 | + |
| 14 | +#include <gflags/gflags.h> |
| 15 | + |
| 16 | +#include <executorch/extension/module/module.h> |
| 17 | +#include <executorch/extension/tensor/tensor.h> |
| 18 | + |
| 19 | +DEFINE_string(model_path, "", "Path to .pte file"); |
| 20 | +DEFINE_string(data_path, "", "Path to .ptd directory (for CUDA delegate)"); |
| 21 | +DEFINE_string(input_dir, "", "Directory with input .bin files"); |
| 22 | +DEFINE_string(output_dir, "", "Directory to write output .bin files"); |
| 23 | + |
| 24 | +using ::executorch::extension::from_blob; |
| 25 | +using ::executorch::extension::Module; |
| 26 | +using ::executorch::runtime::Error; |
| 27 | +using ::executorch::runtime::EValue; |
| 28 | + |
| 29 | +static std::vector<char> read_file(const std::string& path) { |
| 30 | + std::ifstream f(path, std::ios::binary | std::ios::ate); |
| 31 | + if (!f) { |
| 32 | + fprintf(stderr, "Cannot open %s\n", path.c_str()); |
| 33 | + exit(1); |
| 34 | + } |
| 35 | + std::size_t size = static_cast<std::size_t>(f.tellg()); |
| 36 | + f.seekg(0); |
| 37 | + std::vector<char> buf(size); |
| 38 | + f.read(buf.data(), static_cast<std::streamsize>(size)); |
| 39 | + return buf; |
| 40 | +} |
| 41 | + |
| 42 | +static void write_file(const std::string& path, const void* data, size_t len) { |
| 43 | + std::ofstream f(path, std::ios::binary); |
| 44 | + f.write(static_cast<const char*>(data), len); |
| 45 | +} |
| 46 | + |
| 47 | +int main(int argc, char** argv) { |
| 48 | + gflags::ParseCommandLineFlags(&argc, &argv, true); |
| 49 | + if (FLAGS_model_path.empty()) { |
| 50 | + fprintf(stderr, "Error: --model_path required\n"); |
| 51 | + return 1; |
| 52 | + } |
| 53 | + |
| 54 | + std::unique_ptr<Module> module; |
| 55 | + if (!FLAGS_data_path.empty()) { |
| 56 | + module = std::make_unique<Module>( |
| 57 | + FLAGS_model_path, |
| 58 | + FLAGS_data_path, |
| 59 | + Module::LoadMode::MmapUseMlockIgnoreErrors); |
| 60 | + } else { |
| 61 | + module = std::make_unique<Module>( |
| 62 | + FLAGS_model_path, Module::LoadMode::MmapUseMlockIgnoreErrors); |
| 63 | + } |
| 64 | + |
| 65 | + auto load_err = module->load(); |
| 66 | + if (load_err != Error::Ok) { |
| 67 | + fprintf(stderr, "Failed to load model: 0x%x\n", static_cast<int>(load_err)); |
| 68 | + return 1; |
| 69 | + } |
| 70 | + |
| 71 | + constexpr int B = 1, T = 128, H = 4, K = 64, V = 64; |
| 72 | + |
| 73 | + std::vector<EValue> inputs; |
| 74 | + |
| 75 | + if (!FLAGS_input_dir.empty()) { |
| 76 | + // Load inputs from binary files |
| 77 | + struct TensorSpec { |
| 78 | + const char* name; |
| 79 | + std::vector<exec_aten::SizesType> shape; |
| 80 | + exec_aten::ScalarType dtype; |
| 81 | + }; |
| 82 | + TensorSpec specs[] = { |
| 83 | + {"q", {B, T, H, K}, exec_aten::ScalarType::BFloat16}, |
| 84 | + {"k", {B, T, H, K}, exec_aten::ScalarType::BFloat16}, |
| 85 | + {"v", {B, T, H, V}, exec_aten::ScalarType::BFloat16}, |
| 86 | + {"g", {B, T, H}, exec_aten::ScalarType::BFloat16}, |
| 87 | + {"beta", {B, T, H}, exec_aten::ScalarType::BFloat16}, |
| 88 | + {"initial_state", {B, H, K, V}, exec_aten::ScalarType::BFloat16}, |
| 89 | + }; |
| 90 | + |
| 91 | + // Keep data and TensorPtrs alive for the duration of execution |
| 92 | + static std::vector<std::vector<char>> input_bufs; |
| 93 | + static std::vector<executorch::extension::TensorPtr> input_tensors; |
| 94 | + input_bufs.resize(6); |
| 95 | + input_tensors.clear(); |
| 96 | + |
| 97 | + for (int i = 0; i < 6; i++) { |
| 98 | + std::string path = FLAGS_input_dir + "/" + specs[i].name + ".bin"; |
| 99 | + input_bufs[i] = read_file(path); |
| 100 | + input_tensors.push_back( |
| 101 | + from_blob(input_bufs[i].data(), specs[i].shape, specs[i].dtype)); |
| 102 | + inputs.push_back(*input_tensors.back()); |
| 103 | + } |
| 104 | + } else { |
| 105 | + // Generate deterministic test inputs |
| 106 | + auto to_bf16 = [](float f) -> uint16_t { |
| 107 | + uint32_t bits; |
| 108 | + std::memcpy(&bits, &f, sizeof(float)); |
| 109 | + return static_cast<uint16_t>(bits >> 16); |
| 110 | + }; |
| 111 | + |
| 112 | + static std::vector<uint16_t> qk_data(B * T * H * K); |
| 113 | + for (size_t i = 0; i < qk_data.size(); i++) |
| 114 | + qk_data[i] = to_bf16(static_cast<float>(i % 100) * 0.01f - 0.5f); |
| 115 | + static auto v_data = std::vector<uint16_t>(qk_data.begin(), qk_data.end()); |
| 116 | + static std::vector<uint16_t> g_data(B * T * H, to_bf16(-0.5f)); |
| 117 | + static std::vector<uint16_t> beta_data(B * T * H, to_bf16(0.5f)); |
| 118 | + static std::vector<uint16_t> state_data(B * H * K * V, to_bf16(0.0f)); |
| 119 | + |
| 120 | + static std::vector<executorch::extension::TensorPtr> default_tensors; |
| 121 | + default_tensors.clear(); |
| 122 | + default_tensors.push_back(from_blob( |
| 123 | + qk_data.data(), {B, T, H, K}, exec_aten::ScalarType::BFloat16)); |
| 124 | + default_tensors.push_back(from_blob( |
| 125 | + qk_data.data(), {B, T, H, K}, exec_aten::ScalarType::BFloat16)); |
| 126 | + default_tensors.push_back(from_blob( |
| 127 | + v_data.data(), {B, T, H, V}, exec_aten::ScalarType::BFloat16)); |
| 128 | + default_tensors.push_back( |
| 129 | + from_blob(g_data.data(), {B, T, H}, exec_aten::ScalarType::BFloat16)); |
| 130 | + default_tensors.push_back(from_blob( |
| 131 | + beta_data.data(), {B, T, H}, exec_aten::ScalarType::BFloat16)); |
| 132 | + default_tensors.push_back(from_blob( |
| 133 | + state_data.data(), {B, H, K, V}, exec_aten::ScalarType::BFloat16)); |
| 134 | + for (auto& t : default_tensors) |
| 135 | + inputs.push_back(*t); |
| 136 | + } |
| 137 | + |
| 138 | + auto result = module->execute("forward", inputs); |
| 139 | + if (!result.ok()) { |
| 140 | + fprintf(stderr, "Forward failed: 0x%x\n", static_cast<int>(result.error())); |
| 141 | + return 1; |
| 142 | + } |
| 143 | + |
| 144 | + auto outputs = result.get(); |
| 145 | + for (size_t i = 0; i < outputs.size(); i++) { |
| 146 | + if (!outputs[i].isTensor()) |
| 147 | + continue; |
| 148 | + const auto& t = outputs[i].toTensor(); |
| 149 | + printf("Output %zu: [", i); |
| 150 | + for (int d = 0; d < t.dim(); d++) |
| 151 | + printf("%d%s", static_cast<int>(t.size(d)), d < t.dim() - 1 ? "," : ""); |
| 152 | + printf("] dtype=%d\n", static_cast<int>(t.scalar_type())); |
| 153 | + |
| 154 | + if (!FLAGS_output_dir.empty()) { |
| 155 | + // Output tensors are on host memory (CUDA delegate copies back to CPU) |
| 156 | + std::string path = |
| 157 | + FLAGS_output_dir + "/output_" + std::to_string(i) + ".bin"; |
| 158 | + write_file(path, t.const_data_ptr(), t.nbytes()); |
| 159 | + printf(" Saved to %s (%zu bytes)\n", path.c_str(), (size_t)t.nbytes()); |
| 160 | + } |
| 161 | + } |
| 162 | + |
| 163 | + printf("SUCCESS\n"); |
| 164 | + return 0; |
| 165 | +} |
0 commit comments