Excuse me, but I’m fairly new and I lack some context but I adapted a code from an example I’ve found. In my app I have the user able to talk and then send the audio to gemini for a live audio response.
In order to not have the microphone input capture the audio output when played, I’m trying to use React Native WebRTC (is this the correct lib?). However, my code causes the audio output to be low volume and choppy when i use webrtc. If I don’t use React Native WebRTC service then the audio plays normal but the issue with the input capturing the audio output exists.
Here is my React Native WebRTC service that I’m trying to use - WebRTCAudioService - Android Audio is very low and choppy whenever I use this service. · GitHub
- I’m probably not using this correctly? I just don’t want the audio output to be captured in the audio input.
- Is there a solution for this or another approach (maybe different technique or library?)
The issue is seen in Android phone - my phone is an older model.
Code is here WebRTCAudioService - Android Audio is very low and choppy whenever I use this service. · GitHub
but pasting as well.
import { RTCPeerConnection, mediaDevices, MediaStream, MediaStreamTrack } from "react-native-webrtc"
import { RTCSessionDescriptionInit } from "react-native-webrtc/lib/typescript/RTCSessionDescription"
let webrtcInitialized: boolean = false
let localStream: MediaStream | null = null
let peerConnection: RTCPeerConnection | null = null
let processingStarted: boolean = false
const initialize = async (): Promise<boolean> => {
if (webrtcInitialized) {
return true
}
try {
localStream = await mediaDevices.getUserMedia({ audio: true, video: false })
await createLoopbackConnection()
webrtcInitialized = true
return true
} catch (error) {
console.error("WebRTCAudioService: Failed to initialize WebRTC audio processing:", error)
return false
}
}
const createLoopbackConnection = async (): Promise<void> => {
try {
const rtcConfig: RTCConfiguration = {
iceServers: [{ urls: "stun:stun.l.google.com:19302" }],
}
peerConnection = new RTCPeerConnection(rtcConfig)
if (localStream) {
localStream.getTracks().forEach((track: MediaStreamTrack) => {
peerConnection!.addTrack(track, localStream!)
})
const offer = await peerConnection.createOffer(undefined)
await peerConnection.setLocalDescription(offer)
await new Promise<void>((resolve) => {
if (peerConnection!.iceGatheringState === "complete") {
resolve()
} else {
const checkState = (): void => {
if (peerConnection!.iceGatheringState === "complete") {
peerConnection!.removeEventListener("icegatheringstatechange", checkState)
resolve()
}
}
peerConnection!.addEventListener("icegatheringstatechange", checkState)
setTimeout(resolve, 1000)
}
})
const currentLocalDescription = peerConnection.localDescription
if (!currentLocalDescription) {
throw new Error("No local description available")
}
const sdpLines = currentLocalDescription.sdp.split("\r\n")
const modifiedSdpLines = sdpLines.map((line: string) => {
if (line.startsWith("m=")) {
return line
}
if (line.includes("a=setup:")) {
return "a=setup:passive"
}
return line
})
const answer: RTCSessionDescriptionInit = {
type: "answer",
sdp: modifiedSdpLines.join("\r\n"),
}
try {
await peerConnection.setRemoteDescription(answer)
} catch (setRemoteError: any) {
console.error("WebRTCAudioService: Error setting remote description:", setRemoteError)
try {
const simpleSdp = sdpLines.filter((line) => !line.includes("a=setup:")).join("\r\n")
const simpleAnswer: RTCSessionDescriptionInit = {
type: "answer",
sdp: simpleSdp,
}
await peerConnection.setRemoteDescription(simpleAnswer)
} catch (err: any) {
console.error("WebRTCAudioService: Failed to set simplified remote description:", err)
throw new Error("Failed to create WebRTC loopback: " + err.message)
}
}
processingStarted = true
} else {
console.warn("WebRTCAudioService: No local stream available for loopback connection")
}
} catch (error) {
console.error("WebRTCAudioService: Error creating loopback connection:", error)
}
}
const startAudioProcessing = async (): Promise<boolean> => {
try {
if (!webrtcInitialized) {
const initResult = await initialize()
if (!initResult) {
return false
}
}
if (processingStarted) {
return true
}
if (!peerConnection || peerConnection.connectionState === "closed") {
await createLoopbackConnection()
}
return true
} catch (error) {
console.error("WebRTCAudioService: Error starting audio processing:", error)
return false
}
}
const stopAudioProcessing = async (): Promise<void> => {
try {
if (peerConnection) {
peerConnection.close()
peerConnection = null
}
if (localStream) {
localStream.getTracks().forEach((track) => track.stop())
localStream = null
}
processingStarted = false
webrtcInitialized = false
} catch (error) {
console.error("WebRTCAudioService: Error stopping audio processing:", error)
}
}
const isProcessingActive = (): boolean => {
return processingStarted && webrtcInitialized
}
export default {
initialize,
startAudioProcessing,
stopAudioProcessing,
isProcessingActive,
}