The Gemini Live API enables low-latency bidirectional text and voice interactions with Gemini . Using the Live API , you can provide end users with the experience of natural, human-like voice conversations, with the ability to interrupt the model's responses using text or voice commands. The model can process text and audio input (video coming soon!), and it can provide text and audio output.
You can prototype with prompts and the Live API in Google AI Studio or Vertex AI Studio .
The Live API is a stateful API that creates a WebSocket connection to establish a session between the client and the Gemini server. For details, see the Live API reference documentation ( Gemini Developer API | Vertex AI Gemini API ).
Before you begin
Click your Gemini API provider to view provider-specific content and code on this page.
If you haven't already, complete the getting started guide
,
which describes how to set up your Firebase project,
connect your app to Firebase, add the SDK,
initialize the backend service for your chosen Gemini API
provider, and
create a LiveModel
instance.
Models that support this capability
The models that support the Live API depend on your chosen Gemini API provider.
Note that regardless of the API provider, the gemini-2.0-flash
and gemini-2.5-flash
models do not
support the Live API
.
-
Gemini Developer API
-
gemini-live-2.5-flash
(private GA * ) -
gemini-live-2.5-flash-preview
-
gemini-2.0-flash-live-001
-
gemini-2.0-flash-live-preview-04-09
-
-
Vertex AI Gemini API
-
gemini-live-2.5-flash
(private GA * ) -
gemini-2.0-flash-live-preview-04-09
(only available to access inus-central1
)
-
Take note that for the 2.5 model names for the Live API
,
the live
segment immediately follows the gemini
segment.
* Reach out to your Google Cloud account team representative to request access.
Use the standard features of the Live API
This section describes how to use the standard features of the Live API , specifically to stream various types of inputs and outputs:
- Send text and receive text
- Send audio and receive audio
- Send audio and receive text
- Send text and receive audio
Generate streamed text from streamed text input
In that section, you'll also click a button for your chosen Gemini API provider so that you see provider-specific content on this page.
You can send streamed text input and receive streamed text output. Make sure to
create a liveModel
instance and set the response modality
to Text
.
Swift
The Live API is not yet supported for Apple platform apps, but check back soon!
Kotlin
ceived from the server. println
(
outputText
)
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
val
model
=
Firebase
.
ai
(
backend
=
GenerativeBackend
.
googleAI
()).
liveModel
(
modelName
=
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
generationConfig
=
liveGenerationConfig
{
responseModality
=
ResponseModality
.
TEXT
}
)
val
session
=
model
.
connect
()
// Provide a text prompt
val
text
=
"tell a short story"
session
.
send
(
text
)
var
outputText
=
""
session
.
receive
().
collect
{
if
(
it
.
turnComplete
)
{
// Optional: if you don't require to send more requests.
session
.
stopReceiving
();
}
outputText
=
out
putText
+
it
.
text
}
// Output received from the server.
println
(
outputText
)
Java
// Handle exceptions
}
},
executor
);
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
1
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
LiveGenerativeModel
lm
=
FirebaseAI
.
getInstance
(
GenerativeBackend
.
googleAI
()).
liveModel
(
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
new
LiveGenerationConfig
.
Builder
()
.
setResponseModalities
(
ResponseModality
.
TEXT
)
.
build
()
);
LiveMode<lFutures
mo>del
=
LiveModelFutures
.
from
(
lm
);
ListenableFutureLiveSession
sessionFuture
=
model
.
connect
(<);
class
LiveConten>tResponseSubscriber
implements
SubscriberLiveContentResponse
{
@Override
public
void
onSubscribe
(
Subscription
s
)
{
s
.
request
(
Long
.
MAX_VALUE
);
// Request an unlimited number of items
}
@Override
public
void
onNext
(
LiveContentResponse
liveContentResponse
)
{
// Handle the response from the server.
System
.
out
.
println
(
liveContentResponse
.
getText
());
}
@Override
public
void
onError
(
Throwable
t
)
{
System
.
err
.
println
(
"Error: "
+
t
.
getMessage
());
}
@Override
public
void
onComplete
()
{
System
.
out
.
println
(
"Done receiving< messages!&>quot;
);
}
}
Futures
.
addCallback
(
sessionFuture
,
new
FutureCallbackLiveSession
()
{
@Override
public
void
onSuccess
(
LiveSession
ses
)
{
LiveSessionFutures
session
=
LiveSessionFutures
.
from
(
ses
);
// Provide a text prompt
Strin<g
text
=
"tell> me a short story?"
;
session
.
send
(
text
);
PublisherLiveContentResponse
publisher
=
session
.
receive
();
publisher
.
subscribe
(
new
LiveContentResponseSubscriber
());
}
@Override
public
void
onFailure
(
Throwable
t
)
{
// Handle exceptions
}
},
executor
);
Web
uot; :
// Ignore
case
"toolCallCancellation"
:
// Ignore
}
}
// Initialize the Gemini Developer API backend service
const
ai
=
getAI
(
firebaseApp
,
{
backend
:
new
GoogleAIBackend
()
});
// Create a `LiveGenerativeModel` instance with the flash-live model (only model that supports the Live API)
const
model
=
getLiveGenerativeModel
(
ai
,
{
model
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
generationConfig
:
{
responseModalities
:
[
ResponseModality
.
TEXT
],
},
});
const
session
=
await
model
.
connect
();
// Provide a text prompt
const
prompt
=
"tell a short story"
;
session
.
send
(
prompt
);
// Collect text from model's turn
let
text
=
""
;
const
messages
=
session
.
receive
();
for
await
(
const
message
of
messages
)
{
switch
(
message
.
type
)
{
case
"serverContent"
:
if
(
message
.
turnComplete
)
{
console
.
log
(
text
);
}
else
{
const
parts
=
message
.
modelTurn
?
.
parts
;
if
(
parts
)
{
text
+=
parts
.
map
((
part
)
=>
part
.
text
).
join
(
""
);
}
}
break
;
case
"toolCall"
:
// Ignore
case
"toolCallCancellation"
:
// Ignore
}
}
Dart
eive ())
{
// Process the received message
}
import
'package:firebase_ai/firebase_ai.dart'
;
import
'package:firebase_core/firebase_core.dart'
;
import
'firebase_options.dart'
;
late
LiveModelSession
_session
;
await
Firebase
.
initializeApp
(
options:
DefaultFirebaseOptions
.
currentPlatform
,
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
final
model
=
FirebaseAI
.
googleAI
().
liveModel
(
model:
'gemini-2.0-flash-live-preview-04-09'
,
// Configure the model to respond with text
config:
LiveGenerationConfig
(
responseModalities:
[
ResponseModality
.
text
]),
);
_session
=
await
model
.
connect
();
// Provide a text prompt
final
prompt
=
Content
.
text
(
'tell a short story'
);
await
_session
.
send
(
input:
prompt
,
turnComplete:
true
);
// In a separate thread, receive the response
a
wait
for
(
final
message
in
_session
.
receive
())
{
// Process the received message
}
Unity
ge: "
+
message
.
Text
);
}
}
}
using
Firebase
;
using
Firebase.AI
;
async
Task
SendTextReceiveText
()
{
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
var
model
=
FirebaseAI
.
GetInstance
(
FirebaseAI
.
Backend
.
GoogleAI
()).
GetLiveModel
(
modelName
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
liveGenerationConfig
:
new
LiveGenerationConfig
(
responseModalities
:
new
[]
{
ResponseModality
.
Text
})
);
LiveSession
session
=
await
model
.
ConnectAsync
();
// Provide a text prompt
var
prompt
=
ModelContent
.
Text
(
"tell a short story"
);
await
session
.
SendAsync
(
content
:
prompt
,
turnComplete
:
true
);
// Receive the response
await
foreach
(
var
message
in
session
.
ReceiveAsync
())
{
// Process the received message
if
(
!
string
.
IsNullOrEmpty
(
message
.
Text
))
{
UnityEngine
.
D
ebug .
Log
(
"Received message: "
+
message
.
Text
);
}
}
}
Generate streamed audio from streamed audio input
In that section, you'll also click a button for your chosen Gemini API provider so that you see provider-specific content on this page.
You can send streamed audio input and receive streamed audio output. Make sure
to create a LiveModel
instance and set the response modality
to Audio
.
Learn how to configure and customize the response voice (later on this page).
Swift
The Live API is not yet supported for Apple platform apps, but check back soon!
Kotlin
AudioConversation ()
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
val
model
=
Firebase
.
ai
(
backend
=
GenerativeBackend
.
googleAI
()).
liveModel
(
modelName
=
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
generationConfig
=
liveGenerationConfig
{
responseModality
=
ResponseModality
.
AUDIO
}
)
val
session
=
model
.
connect
()
// This is the recommended way.
// However, you can create your own recorder and handle the stream.
sess
ion .
startAudioConversation
()
Java
}
},
executor
);
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
1
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
LiveGenerativeModel
lm
=
FirebaseAI
.
getInstance
(
GenerativeBackend
.
googleAI
()).
liveModel
(
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
new
LiveGenerationConfig
.
Builder
()
.
setResponseModalities
(
ResponseModality
.
TEXT
)
.
build
()
);
LiveModelFutures
model
=
LiveModelFutu<res
.
from
(
lm
> );
ListenableFutureLiveSession
sessionFuture
=
model
.
connect
();
Futures
.
addCallback
(
ses<sionFuture
,
>
new
FutureCallbackLiveSession
()
{
@Override
public
void
onSuccess
(
LiveSession
ses
)
{
LiveSessionFutures
session
=
LiveSessionFutures
.
from
(
ses
);
session
.
startAudioConversation
();
}
@Override
public
void
onFailure
(
Throwable
t
)
{
// Handle ex
ceptions
}
},
executor
);
Web
onController.stop() // Initialize the Gemini Developer API backend service
const
ai
=
getAI
(
firebaseApp
,
{
backend
:
new
GoogleAIBackend
()
});
// Create a `LiveGenerativeModel` instance with the flash-live model (only model that supports the Live API)
const
model
=
getLiveGenerativeModel
(
ai
,
{
model
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with audio
generationConfig
:
{
responseModalities
:
[
ResponseModality
.
AUDIO
],
},
});
const
session
=
await
model
.
connect
();
// Start the audio conversation
const
audioConversationController
=
await
startAudioConversation
(
session
);
// ... Later, to stop the audio conversation
// await audioC
onversationController.stop()
Dart
sion .
receive
())
{
// Process the received message
}
import
'package:firebase_ai/firebase_ai.dart'
;
import
'package:firebase_core/firebase_core.dart'
;
import
'firebase_options.dart'
;
import
'package:your_audio_recorder_package/your_audio_recorder_package.dart'
;
late
LiveModelSession
_session
;
final
_audioRecorder
=
YourAudioRecorder
();
await
Firebase
.
initializeApp
(
options:
DefaultFirebaseOptions
.
currentPlatform
,
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
final
model
=
FirebaseAI
.
googleAI
().
liveModel
(
model:
'gemini-2.0-flash-live-preview-04-09'
,
// Configure the model to respond with audio
config:
LiveGenerationConfig
(
responseModalities:
[
ResponseModality
.
audio
]),
);
_session
=
await
model
.
connect
();
final
audioRecordStream
=
_audioRecorder
.
startRecordingStream
();
// Map the Uint8List stream to InlineDataPart stream
final
mediaChunkStream
=
audioRecordStream
.
map
((
data
)
{
return
InlineDataPart
(
'audio/pcm'
,
data
);
});
await
_session
.
startMediaStream
(
mediaChunkStream
);
// In a separate thread, receive the audio response
from the model await
for
(
final
message
in
_session
.
receive
())
{
// Process the received message
}
Unity
amplesProvided ]
=
0.0f
;
samplesProvided
++
;
}
}
using
Firebase
;
using
Firebase.AI
;
async
Task
SendTextReceiveAudio
()
{
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
var
model
=
FirebaseAI
.
GetInstance
(
FirebaseAI
.
Backend
.
GoogleAI
()).
GetLiveModel
(
modelName
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with audio
liveGenerationConfig
:
new
LiveGenerationConfig
(
responseModalities
:
new
[]
{
ResponseModality
.
Audio
})
);
LiveSession
session
=
await
model
.
ConnectAsync
();
// Start a coroutine to send audio from the Microphone
var
recordingCoroutine
=
StartCoroutine
(
SendAudio
(
session
));
// Start receiving the response
await
ReceiveAudio
(
session
);
}
IEnumerator
SendAudio
(
LiveSession
liveSession
)
{
string
microphoneDeviceName
=
null
;
int
recordingFrequency
=
16000
;
int
recordingBufferSeconds
=
2
;
var
recordingClip
=
Microphone
.
Start
(
microphoneDeviceName
,
true
,
recordingBufferSeconds
,
recordingFrequency
);
int
lastSamplePosition
=
0
;
while
(
true
)
{
if
(
!
Microphone
.
IsRecording
(
microphoneDeviceName
))
{
yield
break
;
}
int
currentSamplePosition
=
Microphone
.
GetPosition
(
microphoneDeviceName
);
if
(
currentSamplePosition
!=
lastSamplePosition
)
{
// The Microphone uses a circular buffer, so we need to check if the
// current position wrapped around to the beginning, and handle it
// accordingly.
int
sampleCount
;
if
(
currentSamplePosition
>
lastSamplePosition
)
{
sampleCount
=
currentSamplePosition
-
lastSamplePosition
;
}
else
{
sampleCount
=
recordingClip
.
samples
-
lastSamplePosition
+
currentSamplePosition
;
}
if
(
sampleCount
>
0
)
{
// Get the audio chunk
float
[]
samples
=
new
float
[
sampleCount
];
recordingClip
.
GetData
(
samples
,
lastSamplePosition
);
// Send the data, discarding the resulting Task to avoid the warning
_
=
liveSession
.
SendAudioAsync
(
samples
);
lastSamplePosition
=
currentSamplePosition
;
}
}
// Wait for a short delay before reading the next sample from the Microphone
const
float
MicrophoneReadDelay
=
0.5f
;
yield
return
new
WaitForSeconds
(
MicrophoneReadDelay
);
}
}
Queue
audioBuffer
=
new
();
async
Task
ReceiveAudio
(
LiveSession
liveSession
)
{
int
sampleRate
=
24000
;
int
channelCount
=
1
;
// Create a looping AudioClip to fill with the received audio data
int
bufferSamples
=
(
int
)(
sampleRate
*
channelCount
);
AudioClip
clip
=
AudioClip
.
Create
(
"StreamingPCM"
,
bufferSamples
,
channelCount
,
sampleRate
,
true
,
OnAudioRead
);
// Attach the clip to an AudioSource and start playing it
AudioSource
audioSource
=
GetComponent
();
audioSource
.
clip
=
clip
;
audioSource
.
loop
=
true
;
audioSource
.
Play
();
// Start receiving the response
await
foreach
(
var
message
in
liveSession
.
ReceiveAsync
())
{
// Process the received message
foreach
(
float
[]
pcmData
in
message
.
AudioAsFloat
)
{
lock
(
audioBuffer
)
{
foreach
(
float
sample
in
pcmData
)
{
audioBuffer
.
Enqueue
(
sample
);
}
}
}
}
}
// This method is called by the AudioClip to load audio data.
private
void
OnAudioRead
(
float
[]
data
)
{
int
samplesToProvide
=
data
.
Length
;
int
samplesProvided
=
0
;
lock
(
audioBuffer
)
{
while
(
samplesProvided
<
samplesToProvide
&&
audioBuffer
.
Count
>
0
)
{
data
[
samplesProvided
]
=
audioBuffer
.
Dequeue
();
samplesProvided
++
;
}
}
while
(
samplesPr
ovided
<
samplesToProvide
)
{
data
[
samplesProvided
]
=
0.0f
;
samplesProvided
++
;
}
}
Generate streamed text from streamed audio input
In that section, you'll also click a button for your chosen Gemini API provider so that you see provider-specific content on this page.
You can send streamed audio input and receive streamed text output. Make sure to
create a LiveModel
instance and set the response modality
to Text
.
Swift
The Live API is not yet supported for Apple platforms apps, but check back soon!
Kotlin
c
eived from the server. println
(
outputText
)
Java
// Handle exceptions
}
},
executor
);
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
1
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
LiveGenerativeModel
lm
=
FirebaseAI
.
getInstance
(
GenerativeBackend
.
googleAI
()).
liveModel
(
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
new
LiveGenerationConfig
.
Builder
()
.
setResponseModalities
(
ResponseModality
.
TEXT
)
.
build
()
);
LiveMode<lFutures
mo>del
=
LiveModelFutures
.
from
(
lm
);
ListenableFutureLiveSession
sessionFuture
=
model
.
connect
(<);
class
LiveConten>tResponseSubscriber
implements
SubscriberLiveContentResponse
{
@Override
public
void
onSubscribe
(
Subscription
s
)
{
s
.
request
(
Long
.
MAX_VALUE
);
// Request an unlimited number of items
}
@Override
public
void
onNext
(
LiveContentResponse
liveContentResponse
)
{
// Handle the response from the server.
System
.
out
.
println
(
liveContentResponse
.
getText
());
}
@Override
public
void
onError
(
Throwable
t
)
{
System
.
err
.
println
(
"Error: "
+
t
.
getMessage
());
}
@Override
public
void
onComplete
()
{
System
.
out
.
println
(
"Done receiving< messages!&>quot;
);
}
}
Futures
.
addCallback
(
sessionFuture
,
new
FutureCallbackLiveSession
()
{
@Override
public
void
onSuccess
(
LiveSession
ses
)
{
LiveSessionFutures
session
=
LiveSessionFutures
.
from
(
ses
);
// Send Audio data
session
.
send
(
new
Content
.
Builder
().
addInlineData
(
< audioData
,
"au>dio/pcm"
).
build
());
session
.
send
(
text
);
PublisherLiveContentResponse
publisher
=
session
.
receive
();
publisher
.
subscribe
(
new
LiveContentResponseSubscriber
());
}
@Override
public
void
onFailure
(
Throwable
t
)
{
// Handle exceptions
}
},
executor
);
Web
uot; :
// Ignore
case
"toolCallCancellation"
:
// Ignore
}
}
// Initialize the Gemini Developer API backend service
const
ai
=
getAI
(
firebaseApp
,
{
backend
:
new
GoogleAIBackend
()
});
// Create a `LiveGenerativeModel` instance with the flash-live model (only model that supports the Live API)
const
model
=
getLiveGenerativeModel
(
ai
,
{
model
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
generationConfig
:
{
responseModalities
:
[
ResponseModality
.
TEXT
],
},
});
const
session
=
await
model
.
connect
();
// TODO(developer): Collect audio data (16-bit 16kHz PCM)
// const audioData = ...
// Send audio
const
audioPart
=
{
inlineData
:
{
data
:
audioData
,
mimeType
:
"audio/pcm"
},
};
session
.
send
([
audioPart
]);
// Collect text from model's turn
let
text
=
""
;
const
messages
=
session
.
receive
();
for
await
(
const
message
of
messages
)
{
switch
(
message
.
type
)
{
case
"serverContent"
:
if
(
message
.
turnComplete
)
{
console
.
log
(
text
);
}
else
{
const
parts
=
message
.
modelTurn
?
.
parts
;
if
(
parts
)
{
text
+=
parts
.
map
((
part
)
=>
part
.
text
).
join
(
""
);
}
}
break
;
case
"toolCall"
:
// Ignore
case
"toolCallCancellation"
:
// Ignore
}
}
Dart
;
// Handle the text response
}
}
catch
(
e
)
{
print
(
'Error:
$
e
'
);
}
}
import
'package:firebase_ai/firebase_ai.dart'
;
import
'package:firebase_core/firebase_core.dart'
;
import
'firebase_options.dart'
;
import
'package:your_audio_recorder_package/your_audio_recorder_package.dart'
;
import
'dart:async'
;
late
LiveModelSession
_session
;
final
_audioRecorder
=
YourAudioRecorder
();
await
Firebase
.
initializeApp
(
options:
DefaultFirebaseOptions
.
currentPlatform
,
);
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
final
model
=
FirebaseAI
.
googleAI
().
liveModel
(
model:
'gemini-2.0-flash-live-preview-04-09'
,
// Configure the model to respond with text
config:
LiveGenerationConfig
(
responseModality:
ResponseModality
.
text
),
);
_session
=
await
model
.
connect
();
final
audioRecordStream
=
_audioRecorder
.
startRecordingStream
();
final
mediaChunkStream
=
audioRecordStream
.
map
((
data
)
{
return
InlineDataPart
(
'audio/pcm'
,
data
);
});
await
_session
.
startMediaStream
(
mediaChunkStream
);
final
responseStream
=
_session
.
receive
();
return
responseStream
.
asyncMap
((
response
)
async
{
if
(
response
.
parts
.
isNotEmpty
&a mp;&
response
.
parts
.
first
.
text
!=
null
)
{
return
response
.
parts
.
first
.
text
!
;
}
else
{
throw
Exception
(
'Text response not found.'
);
}
});
Future
main
()
async
{
try
{
final
textStream
=
await
audioToText
();
await
for
(
final
text
in
textStream
)
{
print
(
'Received text:
$
text
'
);
// Handle the text response
}
}
catch
(
e
)
{
print
(
'Error:
$
e
'
);
}
}
Unity
Seconds (
MicrophoneReadDelay
);
}
}
using
Firebase
;
using
Firebase.AI
;
async
Task
SendAudioReceiveText
()
{
// Initialize the Gemini Developer API backend service
// Create a `LiveModel` instance with the flash-live model (only model that supports the Live API)
var
model
=
FirebaseAI
.
GetInstance
(
FirebaseAI
.
Backend
.
GoogleAI
()).
GetLiveModel
(
modelName
:
"gemini-2.0-flash-live-preview-04-09"
,
// Configure the model to respond with text
liveGenerationConfig
:
new
LiveGenerationConfig
(
responseModalities
:
new
[]
{
ResponseModality
.
Text
})
);
LiveSession
session
=
await
model
.
ConnectAsync
();
// Start a coroutine to send audio from the Microphone
var
recordingCoroutine
=
StartCoroutine
(
SendAudio
(
session
));
// Receive the response
await
foreach
(
var
message
in
session
.
ReceiveAsync
())
{
// Process the received message
if
(
!
string
.
IsNullOrEmpty
(
message
.
Text
))
{
UnityEngine
.
Debug
.
Log
(
"Received message: "
+
message
.
Text
);
}
}
StopCoroutine
(
recordingCoroutine
);
}
IEnumerator
SendAudio
(
LiveSession
liveSession
)
{
string
microphoneDeviceName
=
null
;
int
recordingFrequency
=
16000
;
int
recordingBufferSeconds
=
2
;
var
recordingClip
=
Microphone
.
Start
(
microphoneDeviceName
,
true
,
recordingBufferSeconds
,
recordingFrequency
);
int
lastSamplePosition
=
0
;
while
(
true
)
{
if
(
!
Microphone
.
IsRecording
(
microphoneDeviceName
))
{
yield
break
;
}
int
currentSamplePosition
=
Microphone
.
GetPosition
(
microphoneDeviceName
);
if
(
currentSamplePosition
!=
lastSamplePosition
)
{
// The Microphone uses a circular buffer, so we need to check if the
// current position wrapped around to the beginning, and handle it
// accordingly.
int
sampleCount
;
if
(
currentSamplePosition
>
lastSamplePosition
)
{
sampleCount
=
currentSamplePosition
-
lastSamplePosition
;
}
else
{
sampleCount
=
recordingClip
.
samples
-
lastSamplePosition
+
currentSamplePosition
;
}
if
(
sampleCount
>
0
)
{
// Get the audio chunk
float
[]
samples
=
new
float
[
sampleCount
];
recordingClip
.
GetData
(
samples
,
lastSamplePosition
);
// Send the data, discarding the resulting Task to avoid the warning
_
=
liveSession
.
SendAudioAsync
(
samples
);
lastSamplePosition
=
currentSamplePosition
;
}
}
// Wait for a short delay before reading the next sample from the Microphone
const
float
MicrophoneReadDelay
=
0.5f
;
yield
return
new
WaitForSeconds
(
MicrophoneReadDelay
);
}
}