import { useEffect, useRef, useState, useCallback } from “react”
import {
mediaDevices,
RTCPeerConnection,
RTCIceCandidate,
RTCSessionDescription,
MediaStream,
} from “react-native-webrtc”
import InCallManager from “react-native-incall-manager”
import { Platform } from “react-native”
const defaultConfig = {
iceServers: [
{ urls: “stun:stun.l.google.com:19302” },
{ urls: “stun:stun1.l.google.com:19302” },
{ urls: “stun:stun2.l.google.com:19302” },
{
urls: “turn:url”,
username: “myuser”,
credential: “mypassword”,
},
],
}
export function useWebRTC(config = defaultConfig) {
const peerConnection = useRef(null)
const remoteCandidates = useRef()
const [localStream, setLocalStream] = useState(null)
const [isVoiceOnly, setisVoiceOnly] = useState(false)
const [remoteStream, setRemoteStream] = useState(null)
const [connectionState, setConnectionState] = useState(“”)
const [isInitializing, setIsInitializing] = useState(false)
const [audioEnabled, setAudioEnabled] = useState(true)
const [isAudioSessionActive, setIsAudioSessionActive] = useState(false)
const onIceCandidateRef = useRef(null)
const onRemoteStreamRef = useRef(null)
const onConnectionStateChangeRef = useRef(null)
useEffect(() => {
const initializeAudio = async () => {
try {
InCallManager.stop()
InCallManager.start({ media: “audio”, auto: true, ringback: false })
InCallManager.setKeepScreenOn(true)
InCallManager.setForceSpeakerphoneOn(false)
InCallManager.setSpeakerphoneOn(false)
setIsAudioSessionActive(true)
} catch (error) {
console.error(“ InCallManager init error:”, error)
setIsAudioSessionActive(false)
}
}
initializeAudio()
return () => {
try {
InCallManager.stop()
setIsAudioSessionActive(false)
} catch (error) {
console.error("❌ InCallManager stop error:", error)
}
}
}, )
const fetchLocalStream = useCallback(async (forceNew = false) => {
if (localStream && !forceNew) return localStream
if (isInitializing) {
let attempts = 0
while (isInitializing && attempts < 50) {
await new Promise((res) => setTimeout(res, 100))
attempts++
}
if (localStream) return localStream
}
setIsInitializing(true)
try {
const constraints = {
audio: true,
video: {
frameRate: 30,
facingMode: 'user'
}
}
const stream = await mediaDevices.getUserMedia(constraints)
if (!stream.getAudioTracks().length) throw new Error("No audio tracks")
setLocalStream(stream)
return stream
} catch (error) {
console.error("❌ Error fetching local stream:", error)
throw error
} finally {
setIsInitializing(false)
}
}, [localStream, isInitializing])
const initializePeerConnection = useCallback(() => {
if (peerConnection.current) return peerConnection.current
const pc = new RTCPeerConnection(config)
peerConnection.current = pc
pc.addEventListener('icecandidate', event => {
console.log("Ice Candidate is: ", event)
if (!event.candidate) return
if (event.candidate && onIceCandidateRef.current) {
onIceCandidateRef.current(event.candidate)
console.log("Ice Candidate", event.candidate)
}
})
pc.addEventListener('icecandidateerror', event => {
console.log("Ice candidate errors,", event)
})
pc.ontrack = (event) => {
console.log("🎧 Got remote track", event)
let remote = event.streams?.[0] || new MediaStream()
if (!event.streams?.length && event.track) {
remote.addTrack(event.track)
}
setRemoteStream(remote)
onRemoteStreamRef.current?.(remote)
setTimeout(() => {
try {
InCallManager.setKeepScreenOn(true)
InCallManager.setSpeakerphoneOn(false)
} catch (err) {
console.error("❌ set speaker error:", err)
}
}, 500)
}
pc.addEventListener('iceconnectionstatechange', event => {
switch (pc.iceConnectionState) {
case 'connected':
setTimeout(() => InCallManager.setSpeakerphoneOn(false), 1000)
case 'completed':
console.log("✅ ICE Connected - Call is active")
InCallManager.setSpeakerphoneOn(true)
break
case 'closed':
console.warn("⚠️ ICE Disconnected/Failed/Closed")
break
}
onConnectionStateChangeRef.current?.(pc.iceConnectionState)
})
pc.addEventListener('signalingstatechange', () => {
switch (pc.signalingState) {
case 'closed':
console.log('📴 Signaling state is closed – call has ended.')
break
}
})
return pc
}, [config])
const handleRemoteCandidate = useCallback((iceCandidate) => {
const candidate = new RTCIceCandidate(iceCandidate)
if (!peerConnection.current?.remoteDescription || !peerConnection.current.remoteDescription.type) {
console.log(“ Remote description not set yet — queuing ICE”)
return remoteCandidates.current.push(candidate)
}
return peerConnection.current.addIceCandidate(candidate)
}, )
const processCandidates = useCallback(() => {
if (!remoteCandidates.current.length) return
console.log(“ Processing queued remote ICE candidates”)
remoteCandidates.current.forEach((candidate) => {
peerConnection.current?.addIceCandidate(candidate)
})
remoteCandidates.current =
}, )
const createOffer = useCallback(async () => {
const stream = await fetchLocalStream(true)
const pc = initializePeerConnection()
const existingSenders = pc.getSenders().map(s => s.track?.id)
stream.getTracks().forEach((track) => {
if (!existingSenders.includes(track.id)) {
pc.addTrack(track, stream)
console.log(“ Track added to PeerConnection:”, track.kind, track.id)
}
})
const sessionConstraints = {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true,
VoiceActivityDetection: true,
}
try {
const offerDescription = await pc.createOffer(sessionConstraints)
console.log("Offer SDP:", offerDescription.sdp)
await pc.setLocalDescription(offerDescription)
return offerDescription
} catch (err) {
console.error("❌ Error creating offer:", err)
throw err
}
}, [fetchLocalStream, initializePeerConnection])
const createAnswer = useCallback(async (offer) => {
try {
console.log(“ Received offer SDP:”, offer?.sdp?.slice(0, 60) + “…”)
// 🔧 Initialize the peer connection
const pc = initializePeerConnection()
// 🎙️ Fetch and attach local media stream
const stream = await fetchLocalStream(true)
stream.getTracks().forEach((track) => {
pc.addTrack(track, stream)
console.log("🎙️ Local track added:", track.kind, track.id)
})
// 🧠 Set the remote offer (MUST come after tracks are added)
const remoteDesc = new RTCSessionDescription(offer)
await pc.setRemoteDescription(remoteDesc)
console.log("✅ Remote description set.")
// 📤 Create and set local answer SDP
const answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
console.log("📤 Created and set local SDP answer")
// 🔁 Apply any queued remote ICE candidates
processCandidates()
// 🧪 Optional: Log current connection state
console.log("🛰️ Current ICE Connection State:", pc.iceConnectionState)
return answer
} catch (err) {
console.error(“ Error during createAnswer:”, err)
throw err
}
}, [fetchLocalStream, initializePeerConnection, processCandidates])
const setRemoteDescription = useCallback(async (desc) => {
const pc = initializePeerConnection()
await pc.setRemoteDescription(new RTCSessionDescription(desc))
processCandidates() // Add this here
}, [initializePeerConnection])
const addIceCandidate = useCallback(async (candidate) => {
try {
handleRemoteCandidate(candidate)
} catch (err) {
console.error(“ ICE error:”, err)
}
}, )
const toggleAudio = useCallback(() => {
if (localStream) {
localStream.getAudioTracks().forEach((t) => (t.enabled = !t.enabled))
setAudioEnabled((prev) => !prev)
}
}, [localStream])
const toggleSpeaker = useCallback(() => {
try {
InCallManager.setSpeakerphoneOn(true)
return true
} catch (err) {
console.error(“ Speaker toggle error:”, err)
return false
}
}, )
const forceAudioPlayback = useCallback(() => {
try {
InCallManager.setSpeakerphoneOn(true)
setTimeout(() => InCallManager.setSpeakerphoneOn(false), 1000)
} catch (err) {
console.error(“ forceAudioPlayback error:”, err)
}
}, )
const endCall = useCallback(() => {
if (localStream) {
localStream.getTracks().forEach((t) => t.stop())
setLocalStream(null)
}
if (peerConnection.current) {
peerConnection.current.close()
peerConnection.current = null
}
setRemoteStream(null)
setConnectionState(“”)
InCallManager.stop()
setIsAudioSessionActive(false)
}, [localStream])
const testRemoteAudioTrack = async () => {
const pc = peerConnection.current
if (!pc || !remoteStream) {
console.warn(“ No peer connection or remote stream”)
return
}
const audioTracks = remoteStream.getAudioTracks()
if (!audioTracks.length) {
console.warn(“ No audio tracks in remote stream”)
return
}
const track = audioTracks[0]
console.log(“ Remote audio track details:”, {
id: track.id,
kind: track.kind,
enabled: track.enabled,
readyState: track.readyState,
muted: track.muted,
})
const stats = await pc.getStats()
let inboundFound = false
stats.forEach((report) => {
if (report.type === “inbound-rtp” && report.kind === “audio”) {
inboundFound = true
console.log(“ Inbound audio stats:”, {
packetsReceived: report.packetsReceived,
packetsLost: report.packetsLost,
jitter: report.jitter,
audioLevel: report.audioLevel,
codecId: report.codecId,
})
}
if (report.type === "track" ) {
console.log("🎛️ Remote track report:", {
trackIdentifier: report.trackIdentifier,
remoteSource: report.remoteSource,
ended: report.ended,
framesReceived: report.framesReceived,
})
}
})
if (!inboundFound) {
console.warn(“ No inbound audio stats — likely not receiving any packets”)
}
}
return {
localStream,
remoteStream,
connectionState,
audioEnabled,
isAudioSessionActive,
createOffer,
createAnswer,
setRemoteDescription,
addIceCandidate,
endCall,
fetchLocalStream,
toggleAudio,
toggleSpeaker,
forceAudioPlayback,
getPeerConnection: () => peerConnection.current,
setOnIceCandidate: (cb) => (onIceCandidateRef.current = cb),
setOnRemoteStream: (cb) => (onRemoteStreamRef.current = cb),
setOnConnectionStateChange: (cb) => (onConnectionStateChangeRef.current = cb),
setRemoteStream,
testRemoteAudioTrack,
}
}
mycallscreen:
const VoiceCallScreen = ({ route, navigation }) => {
const { contactName, socketId, contactImage, isIncoming, offer, callType, userId } = route.params
const [callStatus, setCallStatus] = useState(isIncoming ? “incoming” : “connecting”)
const [callDuration, setCallDuration] = useState(0)
const [isCallConnected, setIsCallConnected] = useState(false)
const [isSpeakerOn, setIsSpeakerOn] = useState(false)
const {
localStream,
remoteStream,
connectionState,
audioEnabled,
isAudioSessionActive,
createOffer,
createAnswer,
setRemoteDescription,
addIceCandidate,
endCall,
fetchLocalStream,
toggleAudio,
toggleSpeaker,
forceAudioPlayback,
getPeerConnection,
setOnIceCandidate,
setOnRemoteStream,
setOnConnectionStateChange,
setRemoteStream,
testRemoteAudioTrack
} = useWebRTC()
const timerRef = useRef(null)
const pulseAnim = new Animated.Value(1)
const audioTestRef = useRef(null)
const [re,setre]= useState(null)
// Handle incoming call effects
useEffect(() => {
if (isIncoming && !isCallConnected) {
Vibration.vibrate([500, 500, 500], true)
}
return () => {
Vibration.cancel()
}
}, [isIncoming, isCallConnected])
// Enhanced permission request using the PermissionManager
const requestPermissions = async () => {
try {
console.log(“ Requesting permissions for voice call…”)
const granted = await PermissionManager.ensurePermissions(callType)
if (!granted) {
Alert.alert(
"Permissions Required",
"Voice call permissions are required to continue. Please enable them in your device settings.",
[
{ text: "Cancel", style: "cancel" },
{ text: "Settings", onPress: () => PermissionManager.openSettings() },
],
)
}
return granted
} catch (error) {
console.error("❌ Permission request error:", error)
Alert.alert("Permission Error", "Unable to request permissions. Please check your device settings.")
return false
}
}
const startOutgoingCall = async () => {
try {
console.log(“ Starting outgoing call…”)
await AudioManager.startCallAudio()
// 🎙️ Ensure stream is ready
const stream = await fetchLocalStream(true)
stream.getAudioTracks().forEach((track) => {
track.enabled = true
console.log("🎙️ Local track enabled:", track)
})
// 🧠 createOffer should internally:
// - call initializePeerConnection
// - add track to pc
const offerData = await createOffer()
// ✅ Confirm PeerConnection state
const pc = getPeerConnection()
const senders = pc?.getSenders() || []
console.log("📤 PeerConnection senders:", senders)
// 📤 Send offer to peer
socketService.callUser({
offer: offerData,
to: userId,
callType: callType,
})
} catch (err) {
console.error(“ Call Error:”, err)
Alert.alert(“Call Failed”, “Unable to start the call. Please try again.”)
navigation.goBack()
}
}
const handleAnswer = async (answerData) => {
try {
console.log(“ Handling call answer…”)
await setRemoteDescription(answerData)
console.log(“ Remote description set for answer”)
// Force audio playback after answer is processed
setTimeout(() => {
forceAudioPlayback()
}, 2000)
} catch (error) {
console.error("❌ Error handling answer:", error)
Alert.alert("Call Error", "Failed to process the call answer.")
}
}
const setupSocketListeners = () => {
socketService.onIceCandidate(async (data) => {
console.log(“ Received ICE candidate from peer”)
if (data.candidate) {
await addIceCandidate(data.candidate)
}
})
socketService.onCallAnswered(async (data) => {
console.log(“ Call answered by peer”)
if (data.answer) {
try {
// Set the remote SDP on caller side
await setRemoteDescription(data.answer)
// ✅ Update call status
setCallStatus("connected")
setIsCallConnected(true)
// 📳 Stop vibration if ringing
Vibration.cancel()
console.log("✅ Remote description set from answer")
} catch (err) {
console.error("❌ Error applying remote answer:", err)
}
} else {
console.warn(“ Answer payload missing in onCallAnswered”)
}
})
socketService.onCallRejected(() => {
console.log("❌ Call rejected by peer")
setCallStatus("rejected")
Vibration.cancel()
setTimeout(() => navigation.goBack(), 2000)
})
socketService.onCallEnded(() => {
console.log("📞 Call ended by peer")
setCallStatus("ended")
Vibration.cancel()
setTimeout(() => navigation.goBack(), 2000)
})
socketService.onCallFailed(() => {
console.log("❌ Call failed")
setCallStatus("failed")
Vibration.cancel()
Alert.alert("Call Failed", "The call could not be completed.")
setTimeout(() => navigation.goBack(), 2000)
})
}
const setupWebRTCCallbacks = () => {
setOnIceCandidate((candidate) => {
console.log(“ Sending ICE candidate to peer”)
socketService.sendIceCandidate({
candidate,
to: socketId,
})
})
setOnRemoteStream((stream) => {
console.log("🎧 Remote stream received in callback", stream)
// Force audio playback when remote stream is received
setRemoteStream(stream)
setre(stream)
InCallManager.setSpeakerphoneOn(true)
})
setOnConnectionStateChange((state) => {
console.log("🔗 Connection state changed:", state)
if (state === "connected") {
setCallStatus("connected")
setIsCallConnected(true)
// Force audio playback when connection is established
setTimeout(() => {
forceAudioPlayback()
}, 1500)
} else if (state === "failed" || state === "disconnected") {
setCallStatus("ended")
setTimeout(() => navigation.goBack(), 2000)
}
})
}
const cleanup = async () => {
clearInterval(timerRef.current)
clearInterval(audioTestRef.current)
endCall()
Vibration.cancel()
try {
await AudioManager.stopCallAudio()
} catch (error) {
console.error("❌ Error stopping audio manager:", error)
}
}
// Audio debugging and testing
useEffect(() => {
if (remoteStream && isCallConnected) {
console.log(“ Starting audio debugging…”)
// Periodic audio check
audioTestRef.current = setInterval(() => {
const audioTracks = remoteStream.getAudioTracks()
console.log("🎧 Audio tracks status:", {
count: audioTracks.length,
enabled: audioTracks.map((t) => t.enabled),
readyState: audioTracks.map((t) => t.readyState),
audioSessionActive: isAudioSessionActive,
})
// Try to force audio playback periodically
if (audioTracks.length > 0 && audioTracks[0].enabled) {
forceAudioPlayback()
}
}, 5000)
}
return () => {
if (audioTestRef.current) {
clearInterval(audioTestRef.current)
}
}
}, [remoteStream, isCallConnected, isAudioSessionActive, forceAudioPlayback])
useEffect(() => {
const init = async () => {
const pulse = () => {
Animated.sequence([
Animated.timing(pulseAnim, {
toValue: 1.2,
duration: 1000,
useNativeDriver: true,
}),
Animated.timing(pulseAnim, {
toValue: 1,
duration: 1000,
useNativeDriver: true,
}),
]).start(() => {
if (callStatus === “connecting” || callStatus === “ringing”) {
pulse()
}
})
}
if (callStatus === "connecting" || callStatus === "ringing") {
pulse()
}
const hasPermissions = await requestPermissions()
if (!hasPermissions) {
navigation.goBack()
return
}
try {
await AudioManager.initialize()
} catch (error) {
console.error("❌ Failed to initialize audio manager:", error)
}
setupSocketListeners()
setupWebRTCCallbacks()
if (!isIncoming) {
setTimeout(() => {
setCallStatus("ringing")
startOutgoingCall()
}, 1000)
}
}
init()
return () => {
cleanup()
}
}, )
useEffect(() => {
let interval
if (callStatus === “connected”) {
interval = setInterval(() => {
setCallDuration((prev) => prev + 1)
}, 1000)
}
return () => clearInterval(interval)
}, [callStatus])
const formatDuration = (seconds) => {
const mins = Math.floor(seconds / 60)
const secs = seconds % 60
return ${mins.toString().padStart(2, "0")}:${secs.toString().padStart(2, "0")}
}
const handleAccept = async () => {
try {
console.log(“ Accepting call…”)
setCallStatus(“connected”)
setIsCallConnected(true)
Vibration.cancel()
// 🔊 Start audio environment
await AudioManager.startCallAudio()
// 🎙️ Ensure mic is available
await fetchLocalStream(true)
// 📥 Create and send SDP answer
const answer = await createAnswer(offer)
// 📡 Signal answer back to caller
socketService.answerCall({
answer: answer,
to: socketId, // caller's socket ID
})
console.log("✅ Call accepted and answer sent")
// 🧪 Force speaker audio (helpful for iOS sometimes)
setTimeout(() => {
forceAudioPlayback()
}, 2000)
} catch (error) {
console.error(“ Error accepting call:”, error)
Alert.alert(“Error”, “Failed to accept the call. Please try again.”)
}
}
const handleReject = () => {
console.log(“ Rejecting call…”)
setCallStatus(“rejected”)
Vibration.cancel()
socketService.rejectCall({ to: socketId })
setTimeout(() => navigation.goBack(), 1000)
}
const handleEndCall = async () => {
console.log(“ Ending call…”)
setCallStatus(“ended”)
socketService.endCall({ to: socketId })
endCall()
await AudioManager.stopCallAudio()
navigation.goBack()
}
const handleToggleSpeaker = () => {
const newSpeakerState = toggleSpeaker() // ← from useWebRTC
setIsSpeakerOn(newSpeakerState)
}
// Add debug button for testing audio
const handleTestAudio = () => {
console.log(“ Testing audio playback…”)
forceAudioPlayback()
Alert.alert(“Audio Test”, “Attempting to force audio playback. Check console for details.”)
}
const getStatusText = () => {
switch (callStatus) {
case “incoming”:
return “Incoming call…”
case “connecting”:
return “Connecting…”
case “ringing”:
return “Ringing…”
case “connected”:
return formatDuration(callDuration)
case “rejected”:
return “Call rejected”
case “ended”:
return “Call ended”
case “failed”:
return “Call failed”
default:
return “”
}
}