@@ -59,6 +59,10 @@ class BackgroundCommandPoolVK final {
59
59
static bool kResetOnBackgroundThread = false ;
60
60
61
61
CommandPoolVK::~CommandPoolVK () {
62
+ if (!pool_) {
63
+ return ;
64
+ }
65
+
62
66
auto const context = context_.lock ();
63
67
if (!context) {
64
68
return ;
@@ -84,6 +88,11 @@ vk::UniqueCommandBuffer CommandPoolVK::CreateCommandBuffer() {
84
88
return {};
85
89
}
86
90
91
+ Lock lock (pool_mutex_);
92
+ if (!pool_) {
93
+ return {};
94
+ }
95
+
87
96
auto const device = context->GetDevice ();
88
97
vk::CommandBufferAllocateInfo info;
89
98
info.setCommandPool (pool_.get ());
@@ -97,17 +106,39 @@ vk::UniqueCommandBuffer CommandPoolVK::CreateCommandBuffer() {
97
106
}
98
107
99
108
void CommandPoolVK::CollectCommandBuffer (vk::UniqueCommandBuffer&& buffer) {
109
+ Lock lock (pool_mutex_);
100
110
if (!pool_) {
101
- // If the command pool has already been destroyed, just free the buffer.
111
+ // If the command pool has already been destroyed, then its buffers have
112
+ // already been freed.
113
+ buffer.release ();
102
114
return ;
103
115
}
104
116
collected_buffers_.push_back (std::move (buffer));
105
117
}
106
118
119
+ void CommandPoolVK::Destroy () {
120
+ Lock lock (pool_mutex_);
121
+ pool_.reset ();
122
+
123
+ // When the command pool is destroyed, all of its command buffers are freed.
124
+ // Handles allocated from that pool are now invalid and must be discarded.
125
+ for (auto & buffer : collected_buffers_) {
126
+ buffer.release ();
127
+ }
128
+ collected_buffers_.clear ();
129
+ }
130
+
107
131
// Associates a resource with a thread and context.
108
132
using CommandPoolMap =
109
133
std::unordered_map<uint64_t , std::shared_ptr<CommandPoolVK>>;
110
- FML_THREAD_LOCAL fml::ThreadLocalUniquePtr<CommandPoolMap> resources_;
134
+ FML_THREAD_LOCAL fml::ThreadLocalUniquePtr<CommandPoolMap> tls_command_pool_map;
135
+
136
+ // Map each context to a list of all thread-local command pools associated
137
+ // with that context.
138
+ static Mutex g_all_pools_map_mutex;
139
+ static std::unordered_map<const ContextVK*,
140
+ std::vector<std::weak_ptr<CommandPoolVK>>>
141
+ g_all_pools_map IPLR_GUARDED_BY (g_all_pools_map_mutex);
111
142
112
143
// TODO(matanlurey): Return a status_or<> instead of nullptr when we have one.
113
144
std::shared_ptr<CommandPoolVK> CommandPoolRecyclerVK::Get () {
@@ -117,14 +148,13 @@ std::shared_ptr<CommandPoolVK> CommandPoolRecyclerVK::Get() {
117
148
}
118
149
119
150
// If there is a resource in used for this thread and context, return it.
120
- auto resources = resources_.get ();
121
- if (!resources) {
122
- resources = new CommandPoolMap ();
123
- resources_.reset (resources);
151
+ if (!tls_command_pool_map.get ()) {
152
+ tls_command_pool_map.reset (new CommandPoolMap ());
124
153
}
154
+ CommandPoolMap& pool_map = *tls_command_pool_map.get ();
125
155
auto const hash = strong_context->GetHash ();
126
- auto const it = resources-> find (hash);
127
- if (it != resources-> end ()) {
156
+ auto const it = pool_map. find (hash);
157
+ if (it != pool_map. end ()) {
128
158
return it->second ;
129
159
}
130
160
@@ -136,7 +166,13 @@ std::shared_ptr<CommandPoolVK> CommandPoolRecyclerVK::Get() {
136
166
137
167
auto const resource =
138
168
std::make_shared<CommandPoolVK>(std::move (*pool), context_);
139
- resources->emplace (hash, resource);
169
+ pool_map.emplace (hash, resource);
170
+
171
+ {
172
+ Lock all_pools_lock (g_all_pools_map_mutex);
173
+ g_all_pools_map[strong_context.get ()].push_back (resource);
174
+ }
175
+
140
176
return resource;
141
177
}
142
178
@@ -199,11 +235,34 @@ CommandPoolRecyclerVK::~CommandPoolRecyclerVK() {
199
235
}
200
236
201
237
void CommandPoolRecyclerVK::Dispose () {
202
- auto const resources = resources_.get ();
203
- if (!resources) {
204
- return ;
238
+ CommandPoolMap* pool_map = tls_command_pool_map.get ();
239
+ if (pool_map) {
240
+ pool_map->clear ();
241
+ }
242
+ }
243
+
244
+ void CommandPoolRecyclerVK::DestroyThreadLocalPools (const ContextVK* context) {
245
+ // Delete the context's entry in this thread's command pool map.
246
+ if (tls_command_pool_map.get ()) {
247
+ tls_command_pool_map.get ()->erase (context->GetHash ());
248
+ }
249
+
250
+ // Destroy all other thread-local CommandPoolVK instances associated with
251
+ // this context.
252
+ Lock all_pools_lock (g_all_pools_map_mutex);
253
+ auto found = g_all_pools_map.find (context);
254
+ if (found != g_all_pools_map.end ()) {
255
+ for (auto & weak_pool : found->second ) {
256
+ auto pool = weak_pool.lock ();
257
+ if (!pool) {
258
+ continue ;
259
+ }
260
+ // Delete all objects held by this pool. The destroyed pool will still
261
+ // remain in its thread's TLS map until that thread exits.
262
+ pool->Destroy ();
263
+ }
264
+ g_all_pools_map.erase (found);
205
265
}
206
- resources->clear ();
207
266
}
208
267
209
268
} // namespace impeller
0 commit comments