-
Notifications
You must be signed in to change notification settings - Fork 2.9k
feat(genai): add live samples (2) #10208
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
jdomingr
wants to merge
4
commits into
GoogleCloudPlatform:main
Choose a base branch
from
jdomingr:genai-sdk-live-samples-2
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Binary file not shown.
170 changes: 170 additions & 0 deletions
170
genai/snippets/src/main/java/genai/live/LiveAudioWithTxt.java
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,170 @@ | ||
| /* | ||
| * Copyright 2025 Google LLC | ||
| * | ||
| * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| * you may not use this file except in compliance with the License. | ||
| * You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package genai.live; | ||
|
|
||
| // [START googlegenaisdk_live_audio_with_txt] | ||
|
|
||
| import static com.google.genai.types.Modality.Known.AUDIO; | ||
|
|
||
| import com.google.genai.AsyncSession; | ||
| import com.google.genai.Client; | ||
| import com.google.genai.types.Blob; | ||
| import com.google.genai.types.Content; | ||
| import com.google.genai.types.LiveConnectConfig; | ||
| import com.google.genai.types.LiveSendClientContentParameters; | ||
| import com.google.genai.types.LiveServerContent; | ||
| import com.google.genai.types.LiveServerMessage; | ||
| import com.google.genai.types.Part; | ||
| import com.google.genai.types.PrebuiltVoiceConfig; | ||
| import com.google.genai.types.SpeechConfig; | ||
| import com.google.genai.types.VoiceConfig; | ||
| import java.io.ByteArrayInputStream; | ||
| import java.io.ByteArrayOutputStream; | ||
| import java.io.IOException; | ||
| import java.nio.file.Path; | ||
| import java.nio.file.Paths; | ||
| import java.util.concurrent.CompletableFuture; | ||
| import javax.sound.sampled.AudioFileFormat; | ||
| import javax.sound.sampled.AudioFormat; | ||
| import javax.sound.sampled.AudioInputStream; | ||
| import javax.sound.sampled.AudioSystem; | ||
|
|
||
| public class LiveAudioWithTxt { | ||
|
|
||
| public static void main(String[] args) { | ||
| // TODO(developer): Replace these variables before running the sample. | ||
| String modelId = "gemini-2.0-flash-live-preview-04-09"; | ||
| generateContent(modelId); | ||
| } | ||
|
|
||
| // Shows how to get voice responses from text input. | ||
| public static void generateContent(String modelId) { | ||
| // Client Initialization. Once created, it can be reused for multiple requests. | ||
| try (Client client = Client.builder().location("us-central1").vertexAI(true).build()) { | ||
|
|
||
| LiveConnectConfig liveConnectConfig = | ||
| LiveConnectConfig.builder() | ||
| .responseModalities(AUDIO) | ||
| .speechConfig( | ||
| SpeechConfig.builder() | ||
| .voiceConfig( | ||
| VoiceConfig.builder() | ||
| .prebuiltVoiceConfig( | ||
| PrebuiltVoiceConfig.builder().voiceName("Aoede").build()) | ||
| .build()) | ||
| .build()) | ||
| .build(); | ||
|
|
||
| // Connects to the live server. | ||
| CompletableFuture<AsyncSession> sessionFuture = | ||
| client.async.live.connect(modelId, liveConnectConfig); | ||
|
|
||
| // Sends content and receives response from the live server. | ||
| sessionFuture | ||
| .thenCompose( | ||
| session -> { | ||
| // A future that completes when the model signals the end of its turn. | ||
| CompletableFuture<Void> turnComplete = new CompletableFuture<>(); | ||
| // A buffer to collect all incoming audio chunks. | ||
| ByteArrayOutputStream audioBuffer = new ByteArrayOutputStream(); | ||
| // Starts receiving messages from the live session. | ||
| session.receive( | ||
| message -> handleLiveServerMessage(message, turnComplete, audioBuffer)); | ||
| // Sends content to the live session and waits for the turn to complete. | ||
| return sendContent(session) | ||
| .thenCompose(unused -> turnComplete) | ||
| .thenAccept( | ||
| unused -> { | ||
| byte[] audio = audioBuffer.toByteArray(); | ||
| if (audio.length > 0) { | ||
| saveAudioToFile(audio); | ||
| } | ||
| }) | ||
| .thenCompose(unused -> session.close()); | ||
| }) | ||
| .join(); | ||
| // Example response: | ||
| // > Answer to this audio url | ||
| // Successfully saved audio to... | ||
| } | ||
| } | ||
|
|
||
| // Sends content to the live session. | ||
| private static CompletableFuture<Void> sendContent(AsyncSession session) { | ||
| String textInput = "Hello? Gemini, are you there?"; | ||
| System.out.printf("> %s\n", textInput); | ||
| return session.sendClientContent( | ||
| LiveSendClientContentParameters.builder() | ||
| .turns(Content.builder().role("user").parts(Part.fromText(textInput)).build()) | ||
| .turnComplete(true) | ||
| .build()); | ||
| } | ||
|
|
||
| // Writes the inline data response to the audio buffer and signals | ||
| // `turnComplete` when the model is done generating the response. | ||
| private static void handleLiveServerMessage( | ||
| LiveServerMessage message, | ||
| CompletableFuture<Void> turnComplete, | ||
| ByteArrayOutputStream audioBuffer) { | ||
| message | ||
| .serverContent() | ||
| .flatMap(LiveServerContent::modelTurn) | ||
| .flatMap(Content::parts) | ||
| .ifPresent( | ||
| parts -> | ||
| parts.forEach( | ||
| part -> { | ||
| // When an audio blob is present, write its data to the buffer. | ||
| part.inlineData() | ||
| .flatMap(Blob::data) | ||
| .ifPresent( | ||
| data -> { | ||
| try { | ||
| audioBuffer.write(data); | ||
| } catch (IOException e) { | ||
| System.out.println( | ||
| "Error writing to audio buffer: " + e.getMessage()); | ||
| } | ||
| }); | ||
| })); | ||
|
|
||
| // Checks if the model's turn is over. | ||
| if (message.serverContent().flatMap(LiveServerContent::turnComplete).orElse(false)) { | ||
| turnComplete.complete(null); | ||
| } | ||
| } | ||
|
|
||
| private static void saveAudioToFile(byte[] audioData) { | ||
| try { | ||
| // Defines the audio format. | ||
| AudioFormat format = new AudioFormat(24000, 16, 1, true, false); | ||
| // Creates an AudioInputStream from the raw audio data and the format. | ||
| AudioInputStream audioStream = | ||
| new AudioInputStream( | ||
| new ByteArrayInputStream(audioData), | ||
| format, | ||
| audioData.length / format.getFrameSize()); | ||
|
|
||
| Path outputPath = Paths.get("resources/output/output_audio.wav"); | ||
jdomingr marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| AudioSystem.write(audioStream, AudioFileFormat.Type.WAVE, outputPath.toFile()); | ||
| System.out.println("Successfully saved audio to: " + outputPath.toAbsolutePath()); | ||
| } catch (IOException e) { | ||
| System.err.println("Error saving audio file: " + e.getMessage()); | ||
| } | ||
| } | ||
| } | ||
| // [END googlegenaisdk_live_audio_with_txt] | ||
191 changes: 191 additions & 0 deletions
191
genai/snippets/src/main/java/genai/live/LiveConversationAudioWithAudio.java
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,191 @@ | ||
| /* | ||
| * Copyright 2025 Google LLC | ||
| * | ||
| * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| * you may not use this file except in compliance with the License. | ||
| * You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package genai.live; | ||
|
|
||
| // [START googlegenaisdk_live_conversation_audio_with_audio] | ||
|
|
||
| import static com.google.genai.types.Modality.Known.AUDIO; | ||
|
|
||
| import com.google.genai.AsyncSession; | ||
| import com.google.genai.Client; | ||
| import com.google.genai.types.AudioTranscriptionConfig; | ||
| import com.google.genai.types.Blob; | ||
| import com.google.genai.types.Content; | ||
| import com.google.genai.types.HttpOptions; | ||
| import com.google.genai.types.LiveConnectConfig; | ||
| import com.google.genai.types.LiveSendRealtimeInputParameters; | ||
| import com.google.genai.types.LiveServerMessage; | ||
| import com.google.genai.types.Transcription; | ||
| import java.io.ByteArrayInputStream; | ||
| import java.io.ByteArrayOutputStream; | ||
| import java.io.IOException; | ||
| import java.nio.file.Files; | ||
| import java.nio.file.Path; | ||
| import java.nio.file.Paths; | ||
| import java.util.concurrent.CompletableFuture; | ||
| import javax.sound.sampled.AudioFileFormat; | ||
| import javax.sound.sampled.AudioFormat; | ||
| import javax.sound.sampled.AudioInputStream; | ||
| import javax.sound.sampled.AudioSystem; | ||
|
|
||
| public class LiveConversationAudioWithAudio { | ||
|
|
||
| public static void main(String[] args) throws IOException { | ||
| // TODO(developer): Replace these variables before running the sample. | ||
| String modelId = "gemini-live-2.5-flash-preview-native-audio-09-2025"; | ||
jdomingr marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| generateContent(modelId); | ||
| } | ||
|
|
||
| // Shows how to get an audio response from an audio input. | ||
| public static void generateContent(String modelId) throws IOException { | ||
| // Client Initialization. Once created, it can be reused for multiple requests. | ||
| try (Client client = | ||
| Client.builder() | ||
| .location("us-central1") | ||
| .vertexAI(true) | ||
| .httpOptions(HttpOptions.builder().apiVersion("v1beta1").build()) | ||
| .build()) { | ||
|
|
||
| // Reads the local audio file. | ||
| byte[] audioBytes = Files.readAllBytes(Paths.get("resources/hello_gemini_are_you_there.wav")); | ||
jdomingr marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| LiveConnectConfig liveConnectConfig = | ||
| LiveConnectConfig.builder() | ||
| // Set Model responses to be in Audio. | ||
| .responseModalities(AUDIO) | ||
| // To generate transcript for input audio. | ||
| .inputAudioTranscription(AudioTranscriptionConfig.builder().build()) | ||
| // To generate transcript for output audio | ||
| .outputAudioTranscription(AudioTranscriptionConfig.builder().build()) | ||
| .build(); | ||
|
|
||
| // Connects to the live server. | ||
| CompletableFuture<AsyncSession> sessionFuture = | ||
| client.async.live.connect(modelId, liveConnectConfig); | ||
|
|
||
| // Sends content and receives response from the live server. | ||
| sessionFuture | ||
| .thenCompose( | ||
| session -> { | ||
| // A future that completes when the model signals the end of its turn. | ||
| CompletableFuture<Void> turnComplete = new CompletableFuture<>(); | ||
| // A buffer to collect all incoming audio chunks. | ||
| ByteArrayOutputStream audioBuffer = new ByteArrayOutputStream(); | ||
| // Starts receiving messages from the live session. | ||
| session.receive( | ||
| message -> handleLiveServerMessage(message, turnComplete, audioBuffer)); | ||
| // Sends content to the live session and waits for the turn to complete. | ||
| return sendAudio(session, audioBytes) | ||
| .thenCompose(unused -> turnComplete) | ||
| .thenAccept( | ||
| unused -> { | ||
| byte[] audio = audioBuffer.toByteArray(); | ||
| if (audio.length > 0) { | ||
| saveAudioToFile(audio); | ||
| } | ||
| }) | ||
| .thenCompose(unused -> session.close()); | ||
| }) | ||
| .join(); | ||
| // Example output: | ||
| // Input transcription: Hello | ||
| // Input transcription: . | ||
| // Output transcription: Hello there! | ||
| // Output transcription: How can | ||
| // Output transcription: I help | ||
| // Output transcription: you today? | ||
| // Successfully saved audio to... | ||
| } | ||
| } | ||
|
|
||
| // Sends content to the live session. | ||
| private static CompletableFuture<Void> sendAudio(AsyncSession session, byte[] audioBytes) { | ||
| return session.sendRealtimeInput( | ||
| LiveSendRealtimeInputParameters.builder() | ||
| .audio(Blob.builder().data(audioBytes).mimeType("audio/pcm;rate=16000").build()) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The MIME type and audio rate are hardcoded. These values might vary depending on the audio source. It would be better to derive them from the .audio(Blob.builder().data(audioBytes).mimeType("audio/pcm;rate=16000").build()) // Consider making mimeType and rate dynamic |
||
| .build()); | ||
| } | ||
|
|
||
| // Prints the transcription and writes the inline data response to the audio buffer. | ||
| // Signals `turnComplete` when the model is done generating the response. | ||
| private static void handleLiveServerMessage( | ||
| LiveServerMessage message, | ||
| CompletableFuture<Void> turnComplete, | ||
| ByteArrayOutputStream audioBuffer) { | ||
|
|
||
| message | ||
| .serverContent() | ||
| .ifPresent( | ||
| serverContent -> { | ||
| serverContent | ||
| .inputTranscription() | ||
| .flatMap(Transcription::text) | ||
| .ifPresent(text -> System.out.println("Input transcription: " + text)); | ||
|
|
||
| serverContent | ||
| .outputTranscription() | ||
| .flatMap(Transcription::text) | ||
| .ifPresent(text -> System.out.println("Output transcription: " + text)); | ||
|
|
||
| serverContent | ||
| .modelTurn() | ||
| .flatMap(Content::parts) | ||
| .ifPresent( | ||
| parts -> | ||
| parts.forEach( | ||
| part -> { | ||
| // When an audio blob is present, write its data to the buffer. | ||
| part.inlineData() | ||
| .flatMap(Blob::data) | ||
| .ifPresent( | ||
| data -> { | ||
| try { | ||
| audioBuffer.write(data); | ||
| } catch (IOException e) { | ||
| System.out.println( | ||
| "Error writing to audio buffer: " + e.getMessage()); | ||
| } | ||
| }); | ||
| })); | ||
|
|
||
| // Checks if the model's turn is over. | ||
| if (serverContent.turnComplete().orElse(false)) { | ||
| turnComplete.complete(null); | ||
| } | ||
| }); | ||
| } | ||
|
|
||
| private static void saveAudioToFile(byte[] audioData) { | ||
| try { | ||
| // Defines the audio format. | ||
| AudioFormat format = new AudioFormat(24000, 16, 1, true, false); | ||
| // Creates an AudioInputStream from the raw audio data and the format. | ||
| AudioInputStream audioStream = | ||
| new AudioInputStream( | ||
| new ByteArrayInputStream(audioData), | ||
| format, | ||
| audioData.length / format.getFrameSize()); | ||
|
|
||
| Path outputPath = Paths.get("resources/output/example_model_response.wav"); | ||
jdomingr marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| AudioSystem.write(audioStream, AudioFileFormat.Type.WAVE, outputPath.toFile()); | ||
| System.out.println("Successfully saved audio to: " + outputPath.toAbsolutePath()); | ||
| } catch (IOException e) { | ||
| System.err.println("Error saving audio file: " + e.getMessage()); | ||
| } | ||
| } | ||
| } | ||
| // [END googlegenaisdk_live_conversation_audio_with_audio] | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.