mirror of
https://github.com/oxen-io/session-ios.git
synced 2023-12-13 21:30:14 +01:00
30b50e1489
// FREEBIE
51 lines
1.6 KiB
Swift
51 lines
1.6 KiB
Swift
//
|
|
// Copyright (c) 2017 Open Whisper Systems. All rights reserved.
|
|
//
|
|
|
|
import Foundation
|
|
import WebRTC
|
|
|
|
/**
|
|
* By default WebRTC starts the audio session (PlayAndRecord) immediately upon creating the peer connection
|
|
* but we want to create the peer connection and set up all the signaling channels before we prompt the user
|
|
* for an incoming call. Without manually handling the session, this would result in the user seeing a recording
|
|
* permission requested (and recording banner) before they even know they have an incoming call.
|
|
*
|
|
* By using the `useManualAudio` and `isAudioEnabled` attributes of the RTCAudioSession we can delay recording until
|
|
* it makes sense.
|
|
*/
|
|
class CallAudioSession {
|
|
|
|
let TAG = "[CallAudioSession]"
|
|
|
|
// Force singleton access
|
|
static let shared = CallAudioSession()
|
|
private init() {}
|
|
|
|
/**
|
|
* The private class that manages AVAudioSession for WebRTC
|
|
*/
|
|
private let rtcAudioSession = RTCAudioSession.sharedInstance()
|
|
|
|
/**
|
|
* This must be called before any audio tracks are added to the peerConnection, else we'll start recording before all
|
|
* our signaling is set up.
|
|
*/
|
|
func configure() {
|
|
Logger.info("\(TAG) in \(#function)")
|
|
rtcAudioSession.useManualAudio = true
|
|
}
|
|
|
|
/**
|
|
* Because we useManualAudio with our RTCAudioSession, we have to start/stop the recording audio session ourselves.
|
|
* See header for details on manual audio.
|
|
*/
|
|
var isRTCAudioEnabled: Bool {
|
|
get {
|
|
return rtcAudioSession.isAudioEnabled
|
|
}
|
|
set {
|
|
rtcAudioSession.isAudioEnabled = newValue
|
|
}
|
|
}
|
|
}
|