Skip to content

Commit c073b65

Browse files
andrewferlitschleahecole
authored andcommitted
chore(samples): drop obsolete samples (#210)
Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-videointelligence/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes #178🦕
1 parent aa5fdf7 commit c073b65

File tree

2 files changed

+0
-165
lines changed

2 files changed

+0
-165
lines changed

videointelligence/samples/analyze/beta_snippets.py

Lines changed: 0 additions & 140 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@
2424
python beta_snippets.py video-text-gcs \
2525
gs://python-docs-samples-tests/video/googlework_tiny.mp4
2626
27-
python beta_snippets.py track-objects resources/cat.mp4
28-
2927
python beta_snippets.py streaming-labels resources/cat.mp4
3028
3129
python beta_snippets.py streaming-shot-change resources/cat.mp4
@@ -212,130 +210,6 @@ def video_detect_text(path):
212210
return annotation_result.text_annotations
213211

214212

215-
def track_objects_gcs(gcs_uri):
216-
# [START video_object_tracking_gcs_beta]
217-
"""Object Tracking."""
218-
from google.cloud import videointelligence_v1p2beta1 as videointelligence
219-
220-
# It is recommended to use location_id as 'us-east1' for the best latency
221-
# due to different types of processors used in this region and others.
222-
video_client = videointelligence.VideoIntelligenceServiceClient()
223-
features = [videointelligence.Feature.OBJECT_TRACKING]
224-
operation = video_client.annotate_video(
225-
request={
226-
"features": features,
227-
"input_uri": gcs_uri,
228-
"location_id": "us-east1",
229-
}
230-
)
231-
print("\nProcessing video for object annotations.")
232-
233-
result = operation.result(timeout=500)
234-
print("\nFinished processing.\n")
235-
236-
# The first result is retrieved because a single video was processed.
237-
object_annotations = result.annotation_results[0].object_annotations
238-
239-
# Get only the first annotation for demo purposes.
240-
object_annotation = object_annotations[0]
241-
# description is in Unicode
242-
print(u"Entity description: {}".format(object_annotation.entity.description))
243-
if object_annotation.entity.entity_id:
244-
print("Entity id: {}".format(object_annotation.entity.entity_id))
245-
246-
print(
247-
"Segment: {}s to {}s".format(
248-
object_annotation.segment.start_time_offset.seconds
249-
+ object_annotation.segment.start_time_offset.microseconds / 1e6,
250-
object_annotation.segment.end_time_offset.seconds
251-
+ object_annotation.segment.end_time_offset.microseconds / 1e6,
252-
)
253-
)
254-
255-
print("Confidence: {}".format(object_annotation.confidence))
256-
257-
# Here we print only the bounding box of the first frame in this segment
258-
frame = object_annotation.frames[0]
259-
box = frame.normalized_bounding_box
260-
print(
261-
"Time offset of the first frame: {}s".format(
262-
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
263-
)
264-
)
265-
print("Bounding box position:")
266-
print("\tleft : {}".format(box.left))
267-
print("\ttop : {}".format(box.top))
268-
print("\tright : {}".format(box.right))
269-
print("\tbottom: {}".format(box.bottom))
270-
print("\n")
271-
# [END video_object_tracking_gcs_beta]
272-
return object_annotations
273-
274-
275-
def track_objects(path):
276-
# [START video_object_tracking_beta]
277-
"""Object Tracking."""
278-
from google.cloud import videointelligence_v1p2beta1 as videointelligence
279-
280-
video_client = videointelligence.VideoIntelligenceServiceClient()
281-
features = [videointelligence.Feature.OBJECT_TRACKING]
282-
283-
with io.open(path, "rb") as file:
284-
input_content = file.read()
285-
286-
# It is recommended to use location_id as 'us-east1' for the best latency
287-
# due to different types of processors used in this region and others.
288-
operation = video_client.annotate_video(
289-
request={
290-
"features": features,
291-
"input_content": input_content,
292-
"location_id": "us-east1",
293-
}
294-
)
295-
print("\nProcessing video for object annotations.")
296-
297-
result = operation.result(timeout=500)
298-
print("\nFinished processing.\n")
299-
300-
# The first result is retrieved because a single video was processed.
301-
object_annotations = result.annotation_results[0].object_annotations
302-
303-
# Get only the first annotation for demo purposes.
304-
object_annotation = object_annotations[0]
305-
# description is in Unicode
306-
print(u"Entity description: {}".format(object_annotation.entity.description))
307-
if object_annotation.entity.entity_id:
308-
print("Entity id: {}".format(object_annotation.entity.entity_id))
309-
310-
print(
311-
"Segment: {}s to {}s".format(
312-
object_annotation.segment.start_time_offset.seconds
313-
+ object_annotation.segment.start_time_offset.microseconds / 1e6,
314-
object_annotation.segment.end_time_offset.seconds
315-
+ object_annotation.segment.end_time_offset.microseconds / 1e6,
316-
)
317-
)
318-
319-
print("Confidence: {}".format(object_annotation.confidence))
320-
321-
# Here we print only the bounding box of the first frame in this segment
322-
frame = object_annotation.frames[0]
323-
box = frame.normalized_bounding_box
324-
print(
325-
"Time offset of the first frame: {}s".format(
326-
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
327-
)
328-
)
329-
print("Bounding box position:")
330-
print("\tleft : {}".format(box.left))
331-
print("\ttop : {}".format(box.top))
332-
print("\tright : {}".format(box.right))
333-
print("\tbottom: {}".format(box.bottom))
334-
print("\n")
335-
# [END video_object_tracking_beta]
336-
return object_annotations
337-
338-
339213
def detect_labels_streaming(path):
340214
# [START video_streaming_label_detection_beta]
341215
from google.cloud import videointelligence_v1p3beta1 as videointelligence
@@ -890,16 +764,6 @@ def stream_generator():
890764
)
891765
video_text_parser.add_argument("path")
892766

893-
video_object_tracking_gcs_parser = subparsers.add_parser(
894-
"track-objects-gcs", help=track_objects_gcs.__doc__
895-
)
896-
video_object_tracking_gcs_parser.add_argument("gcs_uri")
897-
898-
video_object_tracking_parser = subparsers.add_parser(
899-
"track-objects", help=track_objects.__doc__
900-
)
901-
video_object_tracking_parser.add_argument("path")
902-
903767
video_streaming_labels_parser = subparsers.add_parser(
904768
"streaming-labels", help=detect_labels_streaming.__doc__
905769
)
@@ -948,10 +812,6 @@ def stream_generator():
948812
video_detect_text_gcs(args.gcs_uri)
949813
elif args.command == "video-text":
950814
video_detect_text(args.path)
951-
elif args.command == "track-objects-gcs":
952-
track_objects_gcs(args.gcs_uri)
953-
elif args.command == "track-objects":
954-
track_objects(args.path)
955815
elif args.command == "streaming-labels":
956816
detect_labels_streaming(args.path)
957817
elif args.command == "streaming-shot-change":

videointelligence/samples/analyze/beta_snippets_test.py

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -143,31 +143,6 @@ def test_detect_text_gcs(capsys):
143143
assert "Text" in out
144144

145145

146-
# Flaky InvalidArgument
147-
@pytest.mark.flaky(max_runs=3, min_passes=1)
148-
def test_track_objects(capsys):
149-
in_file = "./resources/googlework_tiny.mp4"
150-
beta_snippets.track_objects(in_file)
151-
out, _ = capsys.readouterr()
152-
assert "Entity id" in out
153-
154-
155-
# Flaky exceeding designed timeout
156-
@pytest.mark.slow
157-
@pytest.mark.flaky(max_runs=3, min_passes=1)
158-
def test_track_objects_gcs():
159-
in_file = "gs://cloud-samples-data/video/cat.mp4"
160-
object_annotations = beta_snippets.track_objects_gcs(in_file)
161-
162-
text_exists = False
163-
for object_annotation in object_annotations:
164-
if "CAT" in object_annotation.entity.description.upper():
165-
text_exists = True
166-
assert text_exists
167-
assert object_annotations[0].frames[0].normalized_bounding_box.left >= 0.0
168-
assert object_annotations[0].frames[0].normalized_bounding_box.left <= 1.0
169-
170-
171146
# Flaky Gateway
172147
@pytest.mark.flaky(max_runs=3, min_passes=1)
173148
def test_streaming_automl_classification(capsys, video_path):

0 commit comments

Comments
 (0)