|
24 | 24 | python beta_snippets.py video-text-gcs \
|
25 | 25 | gs://python-docs-samples-tests/video/googlework_tiny.mp4
|
26 | 26 |
|
27 |
| - python beta_snippets.py track-objects resources/cat.mp4 |
28 |
| -
|
29 | 27 | python beta_snippets.py streaming-labels resources/cat.mp4
|
30 | 28 |
|
31 | 29 | python beta_snippets.py streaming-shot-change resources/cat.mp4
|
@@ -212,130 +210,6 @@ def video_detect_text(path):
|
212 | 210 | return annotation_result.text_annotations
|
213 | 211 |
|
214 | 212 |
|
215 |
| -def track_objects_gcs(gcs_uri): |
216 |
| - # [START video_object_tracking_gcs_beta] |
217 |
| - """Object Tracking.""" |
218 |
| - from google.cloud import videointelligence_v1p2beta1 as videointelligence |
219 |
| - |
220 |
| - # It is recommended to use location_id as 'us-east1' for the best latency |
221 |
| - # due to different types of processors used in this region and others. |
222 |
| - video_client = videointelligence.VideoIntelligenceServiceClient() |
223 |
| - features = [videointelligence.Feature.OBJECT_TRACKING] |
224 |
| - operation = video_client.annotate_video( |
225 |
| - request={ |
226 |
| - "features": features, |
227 |
| - "input_uri": gcs_uri, |
228 |
| - "location_id": "us-east1", |
229 |
| - } |
230 |
| - ) |
231 |
| - print("\nProcessing video for object annotations.") |
232 |
| - |
233 |
| - result = operation.result(timeout=500) |
234 |
| - print("\nFinished processing.\n") |
235 |
| - |
236 |
| - # The first result is retrieved because a single video was processed. |
237 |
| - object_annotations = result.annotation_results[0].object_annotations |
238 |
| - |
239 |
| - # Get only the first annotation for demo purposes. |
240 |
| - object_annotation = object_annotations[0] |
241 |
| - # description is in Unicode |
242 |
| - print(u"Entity description: {}".format(object_annotation.entity.description)) |
243 |
| - if object_annotation.entity.entity_id: |
244 |
| - print("Entity id: {}".format(object_annotation.entity.entity_id)) |
245 |
| - |
246 |
| - print( |
247 |
| - "Segment: {}s to {}s".format( |
248 |
| - object_annotation.segment.start_time_offset.seconds |
249 |
| - + object_annotation.segment.start_time_offset.microseconds / 1e6, |
250 |
| - object_annotation.segment.end_time_offset.seconds |
251 |
| - + object_annotation.segment.end_time_offset.microseconds / 1e6, |
252 |
| - ) |
253 |
| - ) |
254 |
| - |
255 |
| - print("Confidence: {}".format(object_annotation.confidence)) |
256 |
| - |
257 |
| - # Here we print only the bounding box of the first frame in this segment |
258 |
| - frame = object_annotation.frames[0] |
259 |
| - box = frame.normalized_bounding_box |
260 |
| - print( |
261 |
| - "Time offset of the first frame: {}s".format( |
262 |
| - frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 |
263 |
| - ) |
264 |
| - ) |
265 |
| - print("Bounding box position:") |
266 |
| - print("\tleft : {}".format(box.left)) |
267 |
| - print("\ttop : {}".format(box.top)) |
268 |
| - print("\tright : {}".format(box.right)) |
269 |
| - print("\tbottom: {}".format(box.bottom)) |
270 |
| - print("\n") |
271 |
| - # [END video_object_tracking_gcs_beta] |
272 |
| - return object_annotations |
273 |
| - |
274 |
| - |
275 |
| -def track_objects(path): |
276 |
| - # [START video_object_tracking_beta] |
277 |
| - """Object Tracking.""" |
278 |
| - from google.cloud import videointelligence_v1p2beta1 as videointelligence |
279 |
| - |
280 |
| - video_client = videointelligence.VideoIntelligenceServiceClient() |
281 |
| - features = [videointelligence.Feature.OBJECT_TRACKING] |
282 |
| - |
283 |
| - with io.open(path, "rb") as file: |
284 |
| - input_content = file.read() |
285 |
| - |
286 |
| - # It is recommended to use location_id as 'us-east1' for the best latency |
287 |
| - # due to different types of processors used in this region and others. |
288 |
| - operation = video_client.annotate_video( |
289 |
| - request={ |
290 |
| - "features": features, |
291 |
| - "input_content": input_content, |
292 |
| - "location_id": "us-east1", |
293 |
| - } |
294 |
| - ) |
295 |
| - print("\nProcessing video for object annotations.") |
296 |
| - |
297 |
| - result = operation.result(timeout=500) |
298 |
| - print("\nFinished processing.\n") |
299 |
| - |
300 |
| - # The first result is retrieved because a single video was processed. |
301 |
| - object_annotations = result.annotation_results[0].object_annotations |
302 |
| - |
303 |
| - # Get only the first annotation for demo purposes. |
304 |
| - object_annotation = object_annotations[0] |
305 |
| - # description is in Unicode |
306 |
| - print(u"Entity description: {}".format(object_annotation.entity.description)) |
307 |
| - if object_annotation.entity.entity_id: |
308 |
| - print("Entity id: {}".format(object_annotation.entity.entity_id)) |
309 |
| - |
310 |
| - print( |
311 |
| - "Segment: {}s to {}s".format( |
312 |
| - object_annotation.segment.start_time_offset.seconds |
313 |
| - + object_annotation.segment.start_time_offset.microseconds / 1e6, |
314 |
| - object_annotation.segment.end_time_offset.seconds |
315 |
| - + object_annotation.segment.end_time_offset.microseconds / 1e6, |
316 |
| - ) |
317 |
| - ) |
318 |
| - |
319 |
| - print("Confidence: {}".format(object_annotation.confidence)) |
320 |
| - |
321 |
| - # Here we print only the bounding box of the first frame in this segment |
322 |
| - frame = object_annotation.frames[0] |
323 |
| - box = frame.normalized_bounding_box |
324 |
| - print( |
325 |
| - "Time offset of the first frame: {}s".format( |
326 |
| - frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 |
327 |
| - ) |
328 |
| - ) |
329 |
| - print("Bounding box position:") |
330 |
| - print("\tleft : {}".format(box.left)) |
331 |
| - print("\ttop : {}".format(box.top)) |
332 |
| - print("\tright : {}".format(box.right)) |
333 |
| - print("\tbottom: {}".format(box.bottom)) |
334 |
| - print("\n") |
335 |
| - # [END video_object_tracking_beta] |
336 |
| - return object_annotations |
337 |
| - |
338 |
| - |
339 | 213 | def detect_labels_streaming(path):
|
340 | 214 | # [START video_streaming_label_detection_beta]
|
341 | 215 | from google.cloud import videointelligence_v1p3beta1 as videointelligence
|
@@ -890,16 +764,6 @@ def stream_generator():
|
890 | 764 | )
|
891 | 765 | video_text_parser.add_argument("path")
|
892 | 766 |
|
893 |
| - video_object_tracking_gcs_parser = subparsers.add_parser( |
894 |
| - "track-objects-gcs", help=track_objects_gcs.__doc__ |
895 |
| - ) |
896 |
| - video_object_tracking_gcs_parser.add_argument("gcs_uri") |
897 |
| - |
898 |
| - video_object_tracking_parser = subparsers.add_parser( |
899 |
| - "track-objects", help=track_objects.__doc__ |
900 |
| - ) |
901 |
| - video_object_tracking_parser.add_argument("path") |
902 |
| - |
903 | 767 | video_streaming_labels_parser = subparsers.add_parser(
|
904 | 768 | "streaming-labels", help=detect_labels_streaming.__doc__
|
905 | 769 | )
|
@@ -948,10 +812,6 @@ def stream_generator():
|
948 | 812 | video_detect_text_gcs(args.gcs_uri)
|
949 | 813 | elif args.command == "video-text":
|
950 | 814 | video_detect_text(args.path)
|
951 |
| - elif args.command == "track-objects-gcs": |
952 |
| - track_objects_gcs(args.gcs_uri) |
953 |
| - elif args.command == "track-objects": |
954 |
| - track_objects(args.path) |
955 | 815 | elif args.command == "streaming-labels":
|
956 | 816 | detect_labels_streaming(args.path)
|
957 | 817 | elif args.command == "streaming-shot-change":
|
|
0 commit comments