Skip to content

Commit 4e4b0ab

Browse files
averikitschlesv
authored andcommitted
samples: update shared config (#2443)
* update shared config * Update to 1.0.13 * lint * Fix linting * lint * fix imports Co-authored-by: Les Vogel <[email protected]>
1 parent d442bfd commit 4e4b0ab

28 files changed

+301
-316
lines changed

video/src/main/java/com/example/video/Detect.java

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333

3434
public class Detect {
3535
/**
36-
* Detects video transcription using the Video Intelligence
37-
* API
36+
* Detects video transcription using the Video Intelligence API
37+
*
3838
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
3939
*/
4040
public static void main(String[] args) {
@@ -48,8 +48,8 @@ public static void main(String[] args) {
4848

4949
/**
5050
* Helper that handles the input passed to the program.
51-
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
5251
*
52+
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
5353
* @throws IOException on Input/Output errors.
5454
*/
5555
public static void argsHelper(String[] args) throws Exception {
@@ -82,31 +82,31 @@ public static void speechTranscription(String gcsUri) throws Exception {
8282
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
8383
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
8484
// Set the language code
85-
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder()
86-
.setLanguageCode("en-US")
87-
.setEnableAutomaticPunctuation(true)
88-
.build();
85+
SpeechTranscriptionConfig config =
86+
SpeechTranscriptionConfig.newBuilder()
87+
.setLanguageCode("en-US")
88+
.setEnableAutomaticPunctuation(true)
89+
.build();
8990

9091
// Set the video context with the above configuration
91-
VideoContext context = VideoContext.newBuilder()
92-
.setSpeechTranscriptionConfig(config)
93-
.build();
92+
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
9493

9594
// Create the request
96-
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
97-
.setInputUri(gcsUri)
98-
.addFeatures(Feature.SPEECH_TRANSCRIPTION)
99-
.setVideoContext(context)
100-
.build();
95+
AnnotateVideoRequest request =
96+
AnnotateVideoRequest.newBuilder()
97+
.setInputUri(gcsUri)
98+
.addFeatures(Feature.SPEECH_TRANSCRIPTION)
99+
.setVideoContext(context)
100+
.build();
101101

102102
// asynchronously perform speech transcription on videos
103103
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
104104
client.annotateVideoAsync(request);
105105

106106
System.out.println("Waiting for operation to complete...");
107107
// Display the results
108-
for (VideoAnnotationResults results : response.get(300, TimeUnit.SECONDS)
109-
.getAnnotationResultsList()) {
108+
for (VideoAnnotationResults results :
109+
response.get(300, TimeUnit.SECONDS).getAnnotationResultsList()) {
110110
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
111111
try {
112112
// Print the transcription
@@ -118,12 +118,12 @@ public static void speechTranscription(String gcsUri) throws Exception {
118118

119119
System.out.println("Word level information:");
120120
for (WordInfo wordInfo : alternative.getWordsList()) {
121-
double startTime = wordInfo.getStartTime().getSeconds()
122-
+ wordInfo.getStartTime().getNanos() / 1e9;
123-
double endTime = wordInfo.getEndTime().getSeconds()
124-
+ wordInfo.getEndTime().getNanos() / 1e9;
125-
System.out.printf("\t%4.2fs - %4.2fs: %s\n",
126-
startTime, endTime, wordInfo.getWord());
121+
double startTime =
122+
wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
123+
double endTime =
124+
wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
125+
System.out.printf(
126+
"\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
127127
}
128128
} else {
129129
System.out.println("No transcription found");

video/src/main/java/com/example/video/DetectFaces.java

Lines changed: 20 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,9 @@
3333
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
3434
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
3535
import com.google.protobuf.ByteString;
36-
37-
import java.io.IOException;
3836
import java.nio.file.Files;
3937
import java.nio.file.Path;
4038
import java.nio.file.Paths;
41-
import java.util.concurrent.ExecutionException;
4239

4340
public class DetectFaces {
4441

@@ -51,31 +48,31 @@ public static void detectFaces() throws Exception {
5148
// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
5249
public static void detectFaces(String localFilePath) throws Exception {
5350
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
54-
VideoIntelligenceServiceClient.create()) {
51+
VideoIntelligenceServiceClient.create()) {
5552
// Reads a local video file and converts it to base64.
5653
Path path = Paths.get(localFilePath);
5754
byte[] data = Files.readAllBytes(path);
5855
ByteString inputContent = ByteString.copyFrom(data);
5956

6057
FaceDetectionConfig faceDetectionConfig =
61-
FaceDetectionConfig.newBuilder()
62-
// Must set includeBoundingBoxes to true to get facial attributes.
63-
.setIncludeBoundingBoxes(true)
64-
.setIncludeAttributes(true)
65-
.build();
58+
FaceDetectionConfig.newBuilder()
59+
// Must set includeBoundingBoxes to true to get facial attributes.
60+
.setIncludeBoundingBoxes(true)
61+
.setIncludeAttributes(true)
62+
.build();
6663
VideoContext videoContext =
67-
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
64+
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
6865

6966
AnnotateVideoRequest request =
70-
AnnotateVideoRequest.newBuilder()
71-
.setInputContent(inputContent)
72-
.addFeatures(Feature.FACE_DETECTION)
73-
.setVideoContext(videoContext)
74-
.build();
67+
AnnotateVideoRequest.newBuilder()
68+
.setInputContent(inputContent)
69+
.addFeatures(Feature.FACE_DETECTION)
70+
.setVideoContext(videoContext)
71+
.build();
7572

7673
// Detects faces in a video
7774
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
78-
videoIntelligenceServiceClient.annotateVideoAsync(request);
75+
videoIntelligenceServiceClient.annotateVideoAsync(request);
7976

8077
System.out.println("Waiting for operation to complete...");
8178
AnnotateVideoResponse response = future.get();
@@ -85,18 +82,17 @@ public static void detectFaces(String localFilePath) throws Exception {
8582

8683
// Annotations for list of faces detected, tracked and recognized in video.
8784
for (FaceDetectionAnnotation faceDetectionAnnotation :
88-
annotationResult.getFaceDetectionAnnotationsList()) {
85+
annotationResult.getFaceDetectionAnnotationsList()) {
8986
System.out.print("Face detected:\n");
9087
for (Track track : faceDetectionAnnotation.getTracksList()) {
9188
VideoSegment segment = track.getSegment();
9289
System.out.printf(
93-
"\tStart: %d.%.0fs\n",
94-
segment.getStartTimeOffset().getSeconds(),
95-
segment.getStartTimeOffset().getNanos() / 1e6);
90+
"\tStart: %d.%.0fs\n",
91+
segment.getStartTimeOffset().getSeconds(),
92+
segment.getStartTimeOffset().getNanos() / 1e6);
9693
System.out.printf(
97-
"\tEnd: %d.%.0fs\n",
98-
segment.getEndTimeOffset().getSeconds(),
99-
segment.getEndTimeOffset().getNanos() / 1e6);
94+
"\tEnd: %d.%.0fs\n",
95+
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
10096

10197
// Each segment includes timestamped objects that
10298
// include characteristics of the face detected.
@@ -111,4 +107,4 @@ public static void detectFaces(String localFilePath) throws Exception {
111107
}
112108
}
113109
}
114-
// [END video_detect_faces_beta]
110+
// [END video_detect_faces_beta]

video/src/main/java/com/example/video/DetectFacesGcs.java

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -44,27 +44,27 @@ public static void detectFacesGcs() throws Exception {
4444
// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.
4545
public static void detectFacesGcs(String gcsUri) throws Exception {
4646
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
47-
VideoIntelligenceServiceClient.create()) {
47+
VideoIntelligenceServiceClient.create()) {
4848

4949
FaceDetectionConfig faceDetectionConfig =
50-
FaceDetectionConfig.newBuilder()
51-
// Must set includeBoundingBoxes to true to get facial attributes.
52-
.setIncludeBoundingBoxes(true)
53-
.setIncludeAttributes(true)
54-
.build();
50+
FaceDetectionConfig.newBuilder()
51+
// Must set includeBoundingBoxes to true to get facial attributes.
52+
.setIncludeBoundingBoxes(true)
53+
.setIncludeAttributes(true)
54+
.build();
5555
VideoContext videoContext =
56-
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
56+
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
5757

5858
AnnotateVideoRequest request =
59-
AnnotateVideoRequest.newBuilder()
60-
.setInputUri(gcsUri)
61-
.addFeatures(Feature.FACE_DETECTION)
62-
.setVideoContext(videoContext)
63-
.build();
59+
AnnotateVideoRequest.newBuilder()
60+
.setInputUri(gcsUri)
61+
.addFeatures(Feature.FACE_DETECTION)
62+
.setVideoContext(videoContext)
63+
.build();
6464

6565
// Detects faces in a video
6666
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
67-
videoIntelligenceServiceClient.annotateVideoAsync(request);
67+
videoIntelligenceServiceClient.annotateVideoAsync(request);
6868

6969
System.out.println("Waiting for operation to complete...");
7070
AnnotateVideoResponse response = future.get();
@@ -74,18 +74,17 @@ public static void detectFacesGcs(String gcsUri) throws Exception {
7474

7575
// Annotations for list of people detected, tracked and recognized in video.
7676
for (FaceDetectionAnnotation faceDetectionAnnotation :
77-
annotationResult.getFaceDetectionAnnotationsList()) {
77+
annotationResult.getFaceDetectionAnnotationsList()) {
7878
System.out.print("Face detected:\n");
7979
for (Track track : faceDetectionAnnotation.getTracksList()) {
8080
VideoSegment segment = track.getSegment();
8181
System.out.printf(
82-
"\tStart: %d.%.0fs\n",
83-
segment.getStartTimeOffset().getSeconds(),
84-
segment.getStartTimeOffset().getNanos() / 1e6);
82+
"\tStart: %d.%.0fs\n",
83+
segment.getStartTimeOffset().getSeconds(),
84+
segment.getStartTimeOffset().getNanos() / 1e6);
8585
System.out.printf(
86-
"\tEnd: %d.%.0fs\n",
87-
segment.getEndTimeOffset().getSeconds(),
88-
segment.getEndTimeOffset().getNanos() / 1e6);
86+
"\tEnd: %d.%.0fs\n",
87+
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
8988

9089
// Each segment includes timestamped objects that
9190
// include characteristics of the face detected.

video/src/main/java/com/example/video/DetectLogo.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,10 @@
3131
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
3232
import com.google.protobuf.ByteString;
3333
import com.google.protobuf.Duration;
34-
3534
import java.io.IOException;
3635
import java.nio.file.Files;
3736
import java.nio.file.Path;
3837
import java.nio.file.Paths;
39-
4038
import java.util.concurrent.ExecutionException;
4139

4240
public class DetectLogo {

video/src/main/java/com/example/video/DetectLogoGcs.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,7 @@
3030
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
3131
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
3232
import com.google.protobuf.Duration;
33-
3433
import java.io.IOException;
35-
3634
import java.util.concurrent.ExecutionException;
3735

3836
public class DetectLogoGcs {

video/src/main/java/com/example/video/DetectPerson.java

Lines changed: 24 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
3535
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
3636
import com.google.protobuf.ByteString;
37-
3837
import java.nio.file.Files;
3938
import java.nio.file.Path;
4039
import java.nio.file.Paths;
@@ -47,37 +46,36 @@ public static void detectPerson() throws Exception {
4746
detectPerson(localFilePath);
4847
}
4948

50-
5149
// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
5250
public static void detectPerson(String localFilePath) throws Exception {
5351
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
54-
VideoIntelligenceServiceClient.create()) {
52+
VideoIntelligenceServiceClient.create()) {
5553
// Reads a local video file and converts it to base64.
5654
Path path = Paths.get(localFilePath);
5755
byte[] data = Files.readAllBytes(path);
5856
ByteString inputContent = ByteString.copyFrom(data);
5957

6058
PersonDetectionConfig personDetectionConfig =
61-
PersonDetectionConfig.newBuilder()
62-
// Must set includeBoundingBoxes to true to get poses and attributes.
63-
.setIncludeBoundingBoxes(true)
64-
.setIncludePoseLandmarks(true)
65-
.setIncludeAttributes(true)
66-
.build();
59+
PersonDetectionConfig.newBuilder()
60+
// Must set includeBoundingBoxes to true to get poses and attributes.
61+
.setIncludeBoundingBoxes(true)
62+
.setIncludePoseLandmarks(true)
63+
.setIncludeAttributes(true)
64+
.build();
6765
VideoContext videoContext =
68-
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
66+
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
6967

7068
AnnotateVideoRequest request =
71-
AnnotateVideoRequest.newBuilder()
72-
.setInputContent(inputContent)
73-
.addFeatures(Feature.PERSON_DETECTION)
74-
.setVideoContext(videoContext)
75-
.build();
69+
AnnotateVideoRequest.newBuilder()
70+
.setInputContent(inputContent)
71+
.addFeatures(Feature.PERSON_DETECTION)
72+
.setVideoContext(videoContext)
73+
.build();
7674

7775
// Detects people in a video
7876
// We get the first result because only one video is processed.
7977
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
80-
videoIntelligenceServiceClient.annotateVideoAsync(request);
78+
videoIntelligenceServiceClient.annotateVideoAsync(request);
8179

8280
System.out.println("Waiting for operation to complete...");
8381
AnnotateVideoResponse response = future.get();
@@ -87,18 +85,17 @@ public static void detectPerson(String localFilePath) throws Exception {
8785

8886
// Annotations for list of people detected, tracked and recognized in video.
8987
for (PersonDetectionAnnotation personDetectionAnnotation :
90-
annotationResult.getPersonDetectionAnnotationsList()) {
88+
annotationResult.getPersonDetectionAnnotationsList()) {
9189
System.out.print("Person detected:\n");
9290
for (Track track : personDetectionAnnotation.getTracksList()) {
9391
VideoSegment segment = track.getSegment();
9492
System.out.printf(
95-
"\tStart: %d.%.0fs\n",
96-
segment.getStartTimeOffset().getSeconds(),
97-
segment.getStartTimeOffset().getNanos() / 1e6);
93+
"\tStart: %d.%.0fs\n",
94+
segment.getStartTimeOffset().getSeconds(),
95+
segment.getStartTimeOffset().getNanos() / 1e6);
9896
System.out.printf(
99-
"\tEnd: %d.%.0fs\n",
100-
segment.getEndTimeOffset().getSeconds(),
101-
segment.getEndTimeOffset().getNanos() / 1e6);
97+
"\tEnd: %d.%.0fs\n",
98+
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
10299

103100
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
104101
// posture of the person detected.
@@ -107,18 +104,18 @@ public static void detectPerson(String localFilePath) throws Exception {
107104
// Attributes include unique pieces of clothing, poses, or hair color.
108105
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
109106
System.out.printf(
110-
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
107+
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
111108
}
112109

113110
// Landmarks in person detection include body parts.
114111
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
115112
System.out.printf(
116-
"\tLandmark: %s; Vertex: %f, %f\n",
117-
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
113+
"\tLandmark: %s; Vertex: %f, %f\n",
114+
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
118115
}
119116
}
120117
}
121118
}
122119
}
123120
}
124-
// [END video_detect_person_beta]
121+
// [END video_detect_person_beta]

0 commit comments

Comments
 (0)