-
Notifications
You must be signed in to change notification settings - Fork 50
feat: Add deferred data uploading #1720
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
d001d85
fc55710
e4e02c2
3812d62
2a3d24e
4b5ec6a
87b666e
e6dd52c
00a2cc1
2866410
7bd11e2
bd27143
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ | |
import dataclasses | ||
import math | ||
import os | ||
import threading | ||
from typing import cast, Literal, Mapping, Optional, Sequence, Tuple, Union | ||
import warnings | ||
import weakref | ||
|
@@ -28,15 +29,15 @@ | |
import google.cloud.bigquery_storage_v1 | ||
|
||
import bigframes.core | ||
from bigframes.core import compile, rewrite | ||
from bigframes.core import compile, local_data, rewrite | ||
import bigframes.core.guid | ||
import bigframes.core.nodes as nodes | ||
import bigframes.core.ordering as order | ||
import bigframes.core.tree_properties as tree_properties | ||
import bigframes.dtypes | ||
import bigframes.exceptions as bfe | ||
import bigframes.features | ||
from bigframes.session import executor, local_scan_executor, read_api_execution | ||
from bigframes.session import executor, loader, local_scan_executor, read_api_execution | ||
import bigframes.session._io.bigquery as bq_io | ||
import bigframes.session.metrics | ||
import bigframes.session.planner | ||
|
@@ -65,12 +66,19 @@ def _get_default_output_spec() -> OutputSpec: | |
) | ||
|
||
|
||
SourceIdMapping = Mapping[str, str] | ||
|
||
|
||
class ExecutionCache: | ||
def __init__(self): | ||
# current assumption is only 1 cache of a given node | ||
# in future, might have multiple caches, with different layout, localities | ||
self._cached_executions: weakref.WeakKeyDictionary[ | ||
nodes.BigFrameNode, nodes.BigFrameNode | ||
nodes.BigFrameNode, nodes.CachedTableNode | ||
] = weakref.WeakKeyDictionary() | ||
self._uploaded_local_data: weakref.WeakKeyDictionary[ | ||
local_data.ManagedArrowTable, | ||
tuple[nodes.BigqueryDataSource, SourceIdMapping], | ||
] = weakref.WeakKeyDictionary() | ||
|
||
@property | ||
|
@@ -103,6 +111,17 @@ def cache_results_table( | |
assert original_root.schema == cached_replacement.schema | ||
self._cached_executions[original_root] = cached_replacement | ||
|
||
def cache_remote_replacement( | ||
self, | ||
local_data: local_data.ManagedArrowTable, | ||
bq_data: nodes.BigqueryDataSource, | ||
): | ||
mapping = { | ||
local_data.schema.items[i].column: bq_data.table.physical_schema[i].name | ||
for i in range(len(local_data.schema)) | ||
tswast marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
self._uploaded_local_data[local_data] = (bq_data, mapping) | ||
|
||
|
||
class BigQueryCachingExecutor(executor.Executor): | ||
"""Computes BigFrames values using BigQuery Engine. | ||
|
@@ -118,6 +137,7 @@ def __init__( | |
bqclient: bigquery.Client, | ||
storage_manager: bigframes.session.temporary_storage.TemporaryStorageManager, | ||
bqstoragereadclient: google.cloud.bigquery_storage_v1.BigQueryReadClient, | ||
loader: loader.GbqDataLoader, | ||
*, | ||
strictly_ordered: bool = True, | ||
metrics: Optional[bigframes.session.metrics.ExecutionMetrics] = None, | ||
|
@@ -127,6 +147,7 @@ def __init__( | |
self.strictly_ordered: bool = strictly_ordered | ||
self.cache: ExecutionCache = ExecutionCache() | ||
self.metrics = metrics | ||
self.loader = loader | ||
self.bqstoragereadclient = bqstoragereadclient | ||
# Simple left-to-right precedence for now | ||
self._semi_executors = ( | ||
|
@@ -136,6 +157,7 @@ def __init__( | |
), | ||
local_scan_executor.LocalScanExecutor(), | ||
) | ||
self._upload_lock = threading.Lock() | ||
|
||
def to_sql( | ||
self, | ||
|
@@ -147,6 +169,7 @@ def to_sql( | |
if offset_column: | ||
array_value, _ = array_value.promote_offsets() | ||
node = self.logical_plan(array_value.node) if enable_cache else array_value.node | ||
node = self._substitute_large_local_sources(node) | ||
compiled = compile.compile_sql(compile.CompileRequest(node, sort_rows=ordered)) | ||
return compiled.sql | ||
|
||
|
@@ -372,6 +395,7 @@ def _cache_with_cluster_cols( | |
): | ||
"""Executes the query and uses the resulting table to rewrite future executions.""" | ||
plan = self.logical_plan(array_value.node) | ||
plan = self._substitute_large_local_sources(plan) | ||
compiled = compile.compile_sql( | ||
compile.CompileRequest( | ||
plan, sort_rows=False, materialize_all_order_keys=True | ||
|
@@ -392,7 +416,7 @@ def _cache_with_offsets(self, array_value: bigframes.core.ArrayValue): | |
w_offsets, offset_column = array_value.promote_offsets() | ||
compiled = compile.compile_sql( | ||
compile.CompileRequest( | ||
self.logical_plan(w_offsets.node), | ||
self.logical_plan(self._substitute_large_local_sources(w_offsets.node)), | ||
sort_rows=False, | ||
) | ||
) | ||
|
@@ -502,6 +526,48 @@ def _validate_result_schema( | |
f"This error should only occur while testing. Ibis schema: {ibis_schema} does not match actual schema: {actual_schema}" | ||
) | ||
|
||
def _substitute_large_local_sources(self, original_root: nodes.BigFrameNode): | ||
""" | ||
Replace large local sources with the uploaded version of those datasources. | ||
""" | ||
# Step 1: Upload all previously un-uploaded data | ||
for leaf in original_root.unique_nodes(): | ||
if isinstance(leaf, nodes.ReadLocalNode): | ||
if leaf.local_data_source.metadata.total_bytes > 5000: | ||
tswast marked this conversation as resolved.
Show resolved
Hide resolved
|
||
self._upload_local_data(leaf.local_data_source) | ||
|
||
# Step 2: Replace local scans with remote scans | ||
def map_local_scans(node: nodes.BigFrameNode): | ||
if not isinstance(node, nodes.ReadLocalNode): | ||
return node | ||
if node.local_data_source not in self.cache._uploaded_local_data: | ||
return node | ||
bq_source, source_mapping = self.cache._uploaded_local_data[ | ||
node.local_data_source | ||
] | ||
scan_list = node.scan_list.remap_source_ids(source_mapping) | ||
if node.offsets_col is not None: | ||
scan_list = scan_list.append( | ||
bq_source.table.physical_schema[-1].name, | ||
bigframes.dtypes.INT_DTYPE, | ||
node.offsets_col, | ||
) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What's the reason that offsets_col isn't in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Its a bit clumsy due to offsets_col only really being a thing for the local node, so when we upload, we will set the final physical column to be those offsets, but then we just have to add to the end of the scan_list for the ReadTableNode. Added a comment to explain. Probably some room to respec the leaf nodes for a bit more of a 1:1 mapping. |
||
return nodes.ReadTableNode(bq_source, scan_list, node.session) | ||
|
||
return original_root.bottom_up(map_local_scans) | ||
|
||
def _upload_local_data(self, local_table: local_data.ManagedArrowTable): | ||
if local_table in self.cache._uploaded_local_data: | ||
return | ||
# Lock prevents concurrent repeated work, but slows things down. | ||
# Might be better as a queue and a worker thread | ||
with self._upload_lock: | ||
if local_table not in self.cache._uploaded_local_data: | ||
uploaded = self.loader.load_data( | ||
local_table, bigframes.core.guid.generate_guid() | ||
) | ||
self.cache.cache_remote_replacement(local_table, uploaded) | ||
|
||
def _execute_plan( | ||
self, | ||
plan: nodes.BigFrameNode, | ||
|
@@ -532,6 +598,8 @@ def _execute_plan( | |
# Use explicit destination to avoid 10GB limit of temporary table | ||
if destination_table is not None: | ||
job_config.destination = destination_table | ||
|
||
plan = self._substitute_large_local_sources(plan) | ||
compiled = compile.compile_sql( | ||
compile.CompileRequest(plan, sort_rows=ordered, peek_count=peek) | ||
) | ||
|
Uh oh!
There was an error while loading. Please reload this page.