-
Notifications
You must be signed in to change notification settings - Fork 131
Expand file tree
/
Copy pathupload.py
More file actions
193 lines (163 loc) · 5.8 KB
/
upload.py
File metadata and controls
193 lines (163 loc) · 5.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
"""Function definition of the artifact ``upload``."""
from __future__ import annotations
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from math import ceil
from pathlib import Path
from typing import TYPE_CHECKING
from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
from ..client.client import Client, HUBClient
from .serializer import Serializer
if TYPE_CHECKING:
from typing import Final
import httpx
from ..project.project import Project
SkinnedProgress = partial(
Progress,
TextColumn("[bold cyan blink]Uploading..."),
BarColumn(
complete_style="dark_orange",
finished_style="dark_orange",
pulse_style="orange1",
),
TextColumn("[orange1]{task.percentage:>3.0f}%"),
TimeElapsedColumn(),
transient=True,
)
def upload_chunk(
filepath: Path,
client: httpx.Client,
url: str,
offset: int,
length: int,
content_type: str,
) -> str:
"""
Upload a chunk of the serialized content to the artifacts storage.
Parameters
----------
filepath : ``Path``
The path of the file containing the serialized content.
client : ``httpx.Client``
The client used to upload the chunk to the artifacts storage.
url : str
The url used to upload the chunk to the artifacts storage.
offset : int
The start of the chunk in the file containing the serialized content.
length: int
The length of the chunk in the file containing the serialized content.
content_type: strategy
The type of the content to upload.
Returns
-------
etag : str
The ETag assigned by the artifacts storage to the chunk, used to acknowledge the
upload.
Notes
-----
This function is in charge of reading its own chunk to reduce RAM footprint.
"""
with filepath.open("rb") as file:
file.seek(offset)
response = client.put(
url=url,
content=file.read(length),
headers={"Content-Type": content_type},
timeout=30,
)
return response.headers["etag"]
# This is both the threshold at which a content is split into several small parts for
# upload, and the size of these small parts.
CHUNK_SIZE: Final[int] = int(1e7) # ~10mb
def upload(project: Project, content: str | bytes, content_type: str) -> str:
"""
Upload content to the artifacts storage.
Parameters
----------
project : ``Project``
The project where to upload the content.
content : str | bytes
The content to upload.
content_type : str
The type of content to upload.
Returns
-------
checksum : str
The checksum of the content before upload to the artifacts storage, based on its
serialization.
Notes
-----
A content that was already uploaded in its whole will be ignored.
"""
with (
Serializer(content) as serializer,
HUBClient() as hub_client,
Client() as standard_client,
ThreadPoolExecutor() as pool,
):
# Ask for upload urls.
response = hub_client.post(
url=f"projects/{project.workspace}/{project.name}/artifacts",
json=[
{
"checksum": serializer.checksum,
"chunk_number": ceil(serializer.size / CHUNK_SIZE),
"content_type": content_type,
}
],
)
# An empty response means that an artifact with the same checksum already
# exists. The content doesn't have to be re-uploaded.
if urls := response.json():
task_to_chunk_id = {}
# Upload each chunk of the serialized content to the artifacts storage,
# using a disk temporary file.
#
# Each task is in charge of reading its own file chunk at runtime, to reduce
# RAM footprint.
#
# Use `threading` over `asyncio` to ensure compatibility with Jupyter
# notebooks, where the event loop is already running.
for url in urls:
chunk_id = url["chunk_id"] or 1
task = pool.submit(
upload_chunk,
filepath=serializer.filepath,
client=standard_client,
url=url["upload_url"],
offset=((chunk_id - 1) * CHUNK_SIZE),
length=CHUNK_SIZE,
content_type=(
content_type if len(urls) == 1 else "application/octet-stream"
),
)
task_to_chunk_id[task] = chunk_id
try:
with SkinnedProgress() as progress:
tasks = as_completed(task_to_chunk_id)
total = len(task_to_chunk_id)
etags = dict(
sorted(
(
task_to_chunk_id[task],
task.result(),
)
for task in progress.track(tasks, total=total)
)
)
except BaseException:
# Cancel all remaining tasks, especially on `KeyboardInterrupt`.
for task in task_to_chunk_id:
task.cancel()
raise
# Acknowledge the upload, to let the hub/storage rebuild the whole.
hub_client.post(
url=f"projects/{project.workspace}/{project.name}/artifacts/complete",
json=[
{
"checksum": serializer.checksum,
"etags": etags,
}
],
)
return serializer.checksum