session-ios/SignalMessaging/environment/OWSAudioSession.swift

218 lines
7.1 KiB
Swift
Raw Normal View History

//
// Copyright (c) 2018 Open Whisper Systems. All rights reserved.
//
import Foundation
import WebRTC
2018-10-23 16:40:09 +02:00
@objc(OWSAudioActivity)
public class AudioActivity: NSObject {
let audioDescription: String
2018-10-23 16:40:09 +02:00
let behavior: OWSAudioBehavior
2018-05-25 21:15:19 +02:00
@objc
2018-10-23 16:40:09 +02:00
public init(audioDescription: String, behavior: OWSAudioBehavior) {
self.audioDescription = audioDescription
2018-10-23 16:40:09 +02:00
self.behavior = behavior
}
deinit {
audioSession.ensureAudioSessionActivationStateAfterDelay()
}
// MARK: Dependencies
var audioSession: OWSAudioSession {
return Environment.shared.audioSession
}
2018-10-23 01:17:05 +02:00
// MARK:
override public var description: String {
return "<\(self.logTag) audioDescription: \"\(audioDescription)\">"
}
}
@objc
public class OWSAudioSession: NSObject {
@objc
2018-10-23 01:17:05 +02:00
public func setup() {
NotificationCenter.default.addObserver(self, selector: #selector(proximitySensorStateDidChange(notification:)), name: .UIDeviceProximityStateDidChange, object: nil)
}
2018-10-23 01:17:05 +02:00
// MARK: Dependencies
var proximityMonitoringManager: OWSProximityMonitoringManager {
return Environment.shared.proximityMonitoringManager
}
2018-10-23 01:17:05 +02:00
private let avAudioSession = AVAudioSession.sharedInstance()
2018-10-23 01:17:05 +02:00
private let device = UIDevice.current
2018-10-23 01:17:05 +02:00
// MARK:
private var currentActivities: [Weak<AudioActivity>] = []
2018-10-23 16:40:09 +02:00
var aggregateBehaviors: Set<OWSAudioBehavior> {
return Set(self.currentActivities.compactMap { $0.value?.behavior })
}
2018-05-25 21:15:19 +02:00
@objc
2018-10-23 01:17:05 +02:00
public func startAudioActivity(_ audioActivity: AudioActivity) -> Bool {
Logger.debug("with \(audioActivity)")
objc_sync_enter(self)
defer { objc_sync_exit(self) }
2018-10-23 01:17:05 +02:00
self.currentActivities.append(Weak(value: audioActivity))
do {
try ensureAudioCategory()
return true
} catch {
2018-08-27 16:27:48 +02:00
owsFailDebug("failed with error: \(error)")
return false
}
2018-10-23 01:17:05 +02:00
}
@objc
public func endAudioActivity(_ audioActivity: AudioActivity) {
Logger.debug("with audioActivity: \(audioActivity)")
objc_sync_enter(self)
defer { objc_sync_exit(self) }
currentActivities = currentActivities.filter { return $0.value != audioActivity }
do {
try ensureAudioCategory()
} catch {
owsFailDebug("error in ensureAudioCategory: \(error)")
2018-10-23 01:17:05 +02:00
}
}
func ensureAudioCategory() throws {
2018-10-23 16:40:09 +02:00
if aggregateBehaviors.contains(.audioMessagePlayback) {
self.proximityMonitoringManager.add(lifetime: self)
} else {
self.proximityMonitoringManager.remove(lifetime: self)
}
if aggregateBehaviors.contains(.call) {
// Do nothing while on a call.
// WebRTC/CallAudioService manages call audio
// Eventually it would be nice to consolidate more of the audio
// session handling.
} else if aggregateBehaviors.contains(.playAndRecord) {
assert(avAudioSession.recordPermission() == .granted)
try avAudioSession.setCategory(AVAudioSessionCategoryRecord)
} else if aggregateBehaviors.contains(.audioMessagePlayback) {
if self.device.proximityState {
Logger.debug("proximityState: true")
try avAudioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
try avAudioSession.overrideOutputAudioPort(.none)
} else {
Logger.debug("proximityState: false")
try avAudioSession.setCategory(AVAudioSessionCategoryPlayback)
2018-10-23 01:17:05 +02:00
}
} else if aggregateBehaviors.contains(.playback) {
try avAudioSession.setCategory(AVAudioSessionCategoryPlayback)
} else {
ensureAudioSessionActivationStateAfterDelay()
2018-10-23 01:17:05 +02:00
}
}
2018-05-25 21:15:19 +02:00
@objc
func proximitySensorStateDidChange(notification: Notification) {
do {
try ensureAudioCategory()
} catch {
owsFailDebug("error in response to proximity change: \(error)")
2018-10-23 01:17:05 +02:00
}
}
fileprivate func ensureAudioSessionActivationStateAfterDelay() {
// Without this delay, we sometimes error when deactivating the audio session with:
// Error Domain=NSOSStatusErrorDomain Code=560030580 The operation couldnt be completed. (OSStatus error 560030580.)
// aka "AVAudioSessionErrorCodeIsBusy"
DispatchQueue.global().asyncAfter(deadline: .now() + 0.5) {
self.ensureAudioSessionActivationState()
}
}
private func ensureAudioSessionActivationState() {
objc_sync_enter(self)
defer { objc_sync_exit(self) }
// Cull any stale activities
2018-06-01 20:20:48 +02:00
currentActivities = currentActivities.compactMap { oldActivity in
guard oldActivity.value != nil else {
// Normally we should be explicitly stopping an audio activity, but this allows
// for recovery if the owner of the AudioAcivity was GC'd without ending it's
// audio activity
2018-08-23 16:37:34 +02:00
Logger.warn("an old activity has been gc'd")
return nil
}
// return any still-active activities
return oldActivity
}
guard currentActivities.isEmpty else {
2018-08-24 18:40:16 +02:00
Logger.debug("not deactivating due to currentActivities: \(currentActivities)")
return
}
do {
// When playing audio in Signal, other apps audio (e.g. Music) is paused.
// By notifying when we deactivate, the other app can resume playback.
try avAudioSession.setActive(false, with: [.notifyOthersOnDeactivation])
} catch {
2018-08-27 16:27:48 +02:00
owsFailDebug("failed with error: \(error)")
}
}
// MARK: - WebRTC Audio
/**
* By default WebRTC starts the audio session (PlayAndRecord) immediately upon creating the peer connection
* but we want to create the peer connection and set up all the signaling channels before we prompt the user
* for an incoming call. Without manually handling the session, this would result in the user seeing a recording
* permission requested (and recording banner) before they even know they have an incoming call.
*
* By using the `useManualAudio` and `isAudioEnabled` attributes of the RTCAudioSession we can delay recording until
* it makes sense.
*/
/**
* The private class that manages AVAudioSession for WebRTC
*/
private let rtcAudioSession = RTCAudioSession.sharedInstance()
/**
* This must be called before any audio tracks are added to the peerConnection, else we'll start recording before all
* our signaling is set up.
*/
2018-05-25 21:15:19 +02:00
@objc
public func configureRTCAudio() {
2018-08-23 16:37:34 +02:00
Logger.info("")
rtcAudioSession.useManualAudio = true
}
/**
* Because we useManualAudio with our RTCAudioSession, we have to start/stop the recording audio session ourselves.
* See header for details on manual audio.
*/
2018-05-25 21:15:19 +02:00
@objc
public var isRTCAudioEnabled: Bool {
get {
return rtcAudioSession.isAudioEnabled
}
set {
rtcAudioSession.isAudioEnabled = newValue
}
}
}