Generate text with Gemini Live API

This sample demonstrates how to generate text using Gemini Live API

Explore further

For detailed documentation that includes this code sample, see the following:

Code sample

Java

Before trying this sample, follow the Java setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Java API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  import static 
  
 com.google.genai.types.Modality.Known.TEXT 
 ; 
 import 
  
 com.google.genai.AsyncSession 
 ; 
 import 
  
 com.google.genai.Client 
 ; 
 import 
  
 com.google.genai.types.Content 
 ; 
 import 
  
 com.google.genai.types.HttpOptions 
 ; 
 import 
  
 com.google.genai.types.LiveConnectConfig 
 ; 
 import 
  
 com.google.genai.types.LiveSendClientContentParameters 
 ; 
 import 
  
 com.google.genai.types.LiveServerContent 
 ; 
 import 
  
 com.google.genai.types.LiveServerMessage 
 ; 
 import 
  
 com.google.genai.types.Part 
 ; 
 import 
  
 java.util.concurrent.CompletableFuture 
 ; 
 public 
  
 class 
 LiveWithTxt 
  
 { 
  
 public 
  
 static 
  
 void 
  
 main 
 ( 
 String 
 [] 
  
 args 
 ) 
  
 { 
  
 // TODO(developer): Replace these variables before running the sample. 
  
 String 
  
 modelId 
  
 = 
  
 "gemini-2.0-flash-live-preview-04-09" 
 ; 
  
 generateContent 
 ( 
 modelId 
 ); 
  
 } 
  
 // Shows how to send a text prompt and receive messages from the live session. 
  
 public 
  
 static 
  
 String 
  
 generateContent 
 ( 
 String 
  
 modelId 
 ) 
  
 { 
  
 // Client Initialization. Once created, it can be reused for multiple requests. 
  
 try 
  
 ( 
 Client 
  
 client 
  
 = 
  
 Client 
 . 
 builder 
 () 
  
 . 
 location 
 ( 
 "us-central1" 
 ) 
  
 . 
 vertexAI 
 ( 
 true 
 ) 
  
 . 
 httpOptions 
 ( 
 HttpOptions 
 . 
 builder 
 (). 
 apiVersion 
 ( 
 "v1beta1" 
 ). 
 build 
 ()) 
  
 . 
 build 
 ()) 
  
 { 
  
 // Connects to the live server. 
  
 CompletableFuture<AsyncSession> 
  
 sessionFuture 
  
 = 
  
 client 
 . 
 async 
 . 
 live 
 . 
 connect 
 ( 
  
 modelId 
 , 
  
 LiveConnectConfig 
 . 
 builder 
 (). 
 responseModalities 
 ( 
 TEXT 
 ). 
 build 
 ()); 
  
 // Sends and receives messages from the live session. 
  
 CompletableFuture<String> 
  
 responseFuture 
  
 = 
  
 sessionFuture 
 . 
 thenCompose 
 ( 
  
 session 
  
 - 
>  
 { 
  
 // A future that completes when the model signals the end of its turn. 
  
 CompletableFuture<Void> 
  
 turnComplete 
  
 = 
  
 new 
  
 CompletableFuture 
<> (); 
  
 // A variable to concatenate the text response from the model. 
  
 StringBuilder 
  
 modelResponse 
  
 = 
  
 new 
  
 StringBuilder 
 (); 
  
 // Starts receiving messages from the live session. 
  
 session 
 . 
 receive 
 ( 
  
 message 
  
 - 
>  
 handleLiveServerMessage 
 ( 
 message 
 , 
  
 turnComplete 
 , 
  
 modelResponse 
 )); 
  
 // Sends content to the live session and waits for the turn to complete. 
  
 return 
  
 sendContent 
 ( 
 session 
 ) 
  
 . 
 thenCompose 
 ( 
 unused 
  
 - 
>  
 turnComplete 
 ) 
  
 . 
 thenCompose 
 ( 
  
 unused 
  
 - 
>  
 session 
 . 
 close 
 (). 
 thenApply 
 ( 
 result 
  
 - 
>  
 modelResponse 
 . 
 toString 
 ())); 
  
 }); 
  
 String 
  
 response 
  
 = 
  
 responseFuture 
 . 
 join 
 (); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 response 
 ); 
  
 // Example output: 
  
 // > Hello? Gemini, are you there? 
  
 // 
  
 // Yes, I am here. How can I help you today? 
  
 return 
  
 response 
 ; 
  
 } 
  
 } 
  
 // Sends content to the live session. 
  
 private 
  
 static 
  
 CompletableFuture<Void> 
  
 sendContent 
 ( 
 AsyncSession 
  
 session 
 ) 
  
 { 
  
 String 
  
 textInput 
  
 = 
  
 "Hello? Gemini, are you there?" 
 ; 
  
 System 
 . 
 out 
 . 
 printf 
 ( 
 "> %s\n" 
 , 
  
 textInput 
 ); 
  
 return 
  
 session 
 . 
 sendClientContent 
 ( 
  
 LiveSendClientContentParameters 
 . 
 builder 
 () 
  
 . 
 turns 
 ( 
 Content 
 . 
 builder 
 (). 
 role 
 ( 
 "user" 
 ). 
 parts 
 ( 
 Part 
 . 
 fromText 
 ( 
 textInput 
 )). 
 build 
 ()) 
  
 . 
 turnComplete 
 ( 
 true 
 ) 
  
 . 
 build 
 ()); 
  
 } 
  
 // Concatenates the output transcription from the model and signals 
  
 // `turnComplete` when the model is done generating the response. 
  
 private 
  
 static 
  
 void 
  
 handleLiveServerMessage 
 ( 
  
 LiveServerMessage 
  
 message 
 , 
  
 CompletableFuture<Void> 
  
 turnComplete 
 , 
  
 StringBuilder 
  
 response 
 ) 
  
 { 
  
 message 
  
 . 
 serverContent 
 () 
  
 . 
 flatMap 
 ( 
 LiveServerContent 
 :: 
 modelTurn 
 ) 
  
 . 
 flatMap 
 ( 
 Content 
 :: 
 parts 
 ) 
  
 . 
 ifPresent 
 ( 
 parts 
  
 - 
>  
 parts 
 . 
 forEach 
 ( 
 part 
  
 - 
>  
 part 
 . 
 text 
 (). 
 ifPresent 
 ( 
 response 
 :: 
 append 
 ))); 
  
 // Checks if the model's turn is over. 
  
 if 
  
 ( 
 message 
 . 
 serverContent 
 (). 
 flatMap 
 ( 
 LiveServerContent 
 :: 
 turnComplete 
 ). 
 orElse 
 ( 
 false 
 )) 
  
 { 
  
 turnComplete 
 . 
 complete 
 ( 
 null 
 ); 
  
 } 
  
 } 
 } 
 

Node.js

Before trying this sample, follow the Node.js setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Node.js API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  'use strict' 
 ; 
 const 
  
 { 
 GoogleGenAI 
 , 
  
 Modality 
 } 
  
 = 
  
 require 
 ( 
 '@google/genai' 
 ); 
 const 
  
 GOOGLE_CLOUD_PROJECT 
  
 = 
  
 process 
 . 
 env 
 . 
 GOOGLE_CLOUD_PROJECT 
 ; 
 const 
  
 GOOGLE_CLOUD_LOCATION 
  
 = 
  
 process 
 . 
 env 
 . 
 GOOGLE_CLOUD_LOCATION 
  
 || 
  
 'global' 
 ; 
 async 
  
 function 
  
 generateLiveConversation 
 ( 
  
 projectId 
  
 = 
  
 GOOGLE_CLOUD_PROJECT 
 , 
  
 location 
  
 = 
  
 GOOGLE_CLOUD_LOCATION 
 ) 
  
 { 
  
 const 
  
 client 
  
 = 
  
 new 
  
 GoogleGenAI 
 ({ 
  
 vertexai 
 : 
  
 true 
 , 
  
 project 
 : 
  
 projectId 
 , 
  
 location 
 : 
  
 location 
 , 
  
 }); 
  
 const 
  
 modelId 
  
 = 
  
 'gemini-2.0-flash-live-preview-04-09' 
 ; 
  
 const 
  
 config 
  
 = 
  
 { 
 responseModalities 
 : 
  
 [ 
 Modality 
 . 
 TEXT 
 ]}; 
  
 const 
  
 responseQueue 
  
 = 
  
 []; 
  
 async 
  
 function 
  
 waitMessage 
 () 
  
 { 
  
 while 
  
 ( 
 responseQueue 
 . 
 length 
  
 === 
  
 0 
 ) 
  
 { 
  
 await 
  
 new 
  
 Promise 
 ( 
 resolve 
  
 = 
>  
 setTimeout 
 ( 
 resolve 
 , 
  
 100 
 )); 
  
 } 
  
 return 
  
 responseQueue 
 . 
 shift 
 (); 
  
 } 
  
 async 
  
 function 
  
 handleTurn 
 () 
  
 { 
  
 const 
  
 turns 
  
 = 
  
 []; 
  
 let 
  
 done 
  
 = 
  
 false 
 ; 
  
 while 
  
 ( 
 ! 
 done 
 ) 
  
 { 
  
 const 
  
 message 
  
 = 
  
 await 
  
 waitMessage 
 (); 
  
 turns 
 . 
 push 
 ( 
 message 
 ); 
  
 if 
  
 ( 
 message 
 . 
 serverContent 
 && 
 message 
 . 
 serverContent 
 . 
 turnComplete 
 ) 
  
 { 
  
 done 
  
 = 
  
 true 
 ; 
  
 } 
  
 } 
  
 return 
  
 turns 
 ; 
  
 } 
  
 const 
  
 session 
  
 = 
  
 await 
  
 client 
 . 
 live 
 . 
 connect 
 ({ 
  
 model 
 : 
  
 modelId 
 , 
  
 config 
 : 
  
 config 
 , 
  
 callbacks 
 : 
  
 { 
  
 onmessage 
 : 
  
 msg 
  
 = 
>  
 responseQueue 
 . 
 push 
 ( 
 msg 
 ), 
  
 onerror 
 : 
  
 e 
  
 = 
>  
 console 
 . 
 error 
 ( 
 'Error:' 
 , 
  
 e 
 . 
 message 
 ), 
  
 }, 
  
 }); 
  
 const 
  
 textInput 
  
 = 
  
 'Hello? Gemini, are you there?' 
 ; 
  
 console 
 . 
 log 
 ( 
 '> ' 
 , 
  
 textInput 
 , 
  
 '\n' 
 ); 
  
 await 
  
 session 
 . 
 sendClientContent 
 ({ 
  
 turns 
 : 
  
 [{ 
 role 
 : 
  
 'user' 
 , 
  
 parts 
 : 
  
 [{ 
 text 
 : 
  
 textInput 
 }]}], 
  
 }); 
  
 const 
  
 turns 
  
 = 
  
 await 
  
 handleTurn 
 (); 
  
 for 
  
 ( 
 const 
  
 turn 
  
 of 
  
 turns 
 ) 
  
 { 
  
 if 
  
 ( 
 turn 
 . 
 text 
 ) 
  
 { 
  
 console 
 . 
 log 
 ( 
 'Received text:' 
 , 
  
 turn 
 . 
 text 
 ); 
  
 } 
  
 } 
  
 // Example output: 
  
 //> Hello? Gemini, are you there? 
  
 // Received text: Yes 
  
 // Received text: I'm here. How can I help you today? 
  
 session 
 . 
 close 
 (); 
  
 return 
  
 turns 
 ; 
 } 
 

Python

Before trying this sample, follow the Python setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Python API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  from 
  
 google 
  
 import 
 genai 
 from 
  
 google.genai.types 
  
 import 
 ( 
 Content 
 , 
 HttpOptions 
 , 
 LiveConnectConfig 
 , 
 Modality 
 , 
 Part 
 ) 
 client 
 = 
 genai 
 . 
 Client 
 ( 
 http_options 
 = 
 HttpOptions 
 ( 
 api_version 
 = 
 "v1beta1" 
 )) 
 model_id 
 = 
 "gemini-2.0-flash-live-preview-04-09" 
 async 
 with 
 client 
 . 
 aio 
 . 
 live 
 . 
 connect 
 ( 
 model 
 = 
 model_id 
 , 
 config 
 = 
 LiveConnectConfig 
 ( 
 response_modalities 
 = 
 [ 
 Modality 
 . 
 TEXT 
 ]), 
 ) 
 as 
 session 
 : 
 text_input 
 = 
 "Hello? Gemini, are you there?" 
 print 
 ( 
 "> " 
 , 
 text_input 
 , 
 " 
 \n 
 " 
 ) 
 await 
 session 
 . 
 send_client_content 
 ( 
 turns 
 = 
 Content 
 ( 
 role 
 = 
 "user" 
 , 
 parts 
 = 
 [ 
 Part 
 ( 
 text 
 = 
 text_input 
 )]) 
 ) 
 response 
 = 
 [] 
 async 
 for 
 message 
 in 
 session 
 . 
 receive 
 (): 
 if 
 message 
 . 
 text 
 : 
 response 
 . 
 append 
 ( 
 message 
 . 
 text 
 ) 
 print 
 ( 
 "" 
 . 
 join 
 ( 
 response 
 )) 
 # Example output: 
 # >  Hello? Gemini, are you there? 
 # Yes, I'm here. What would you like to talk about? 
 

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser .

Design a Mobile Site
View Site in Mobile | Classic
Share by: