diff --git a/Wave/AppState.swift b/Wave/AppState.swift index 4cc8c81..555e034 100644 --- a/Wave/AppState.swift +++ b/Wave/AppState.swift @@ -36,6 +36,9 @@ final class AppState { var includePunctuation: Bool { didSet { UserDefaults.standard.set(includePunctuation, forKey: "includePunctuation") } } + var muteSystemAudio: Bool { + didSet { UserDefaults.standard.set(muteSystemAudio, forKey: "muteSystemAudio") } + } // MARK: - Services let modelManager = ModelManager() @@ -66,6 +69,7 @@ final class AppState { } else { includePunctuation = UserDefaults.standard.bool(forKey: "includePunctuation") } + muteSystemAudio = UserDefaults.standard.bool(forKey: "muteSystemAudio") // Default shortcut: Control + Space if hotkeyKeyCode == 0 && hotkeyModifiers == 0 { @@ -130,8 +134,10 @@ final class AppState { do { status = .recording showOverlay() + if muteSystemAudio { SystemAudioDucker.duck() } try await transcriptionService.startRecording() } catch { + if muteSystemAudio { SystemAudioDucker.restore() } status = .error("Recording failed") hideOverlay() try? await Task.sleep(for: .seconds(2)) @@ -144,6 +150,7 @@ final class AppState { updateOverlay() let text = await transcriptionService.stopRecordingAndTranscribe(includePunctuation: includePunctuation) + if muteSystemAudio { SystemAudioDucker.restore() } hideOverlay() diff --git a/Wave/Utilities/SystemAudioDucker.swift b/Wave/Utilities/SystemAudioDucker.swift new file mode 100644 index 0000000..14e775a --- /dev/null +++ b/Wave/Utilities/SystemAudioDucker.swift @@ -0,0 +1,60 @@ +import CoreAudio + +/// Saves and restores the default output device's mute state around a dictation session. +/// Uses CoreAudio's `kAudioDevicePropertyMute` rather than changing volume so the +/// user's volume setting is never touched. +enum SystemAudioDucker { + private static var savedMuteState: Bool = false + + /// Snapshot the current mute state, then mute system output. + static func duck() { + savedMuteState = isMuted() + setMuted(true) + } + + /// Restore the mute state captured at the last `duck()` call. + static func restore() { + setMuted(savedMuteState) + } + + // MARK: - Private + + private static func isMuted() -> Bool { + guard let device = defaultOutputDevice() else { return false } + var mute: UInt32 = 0 + var size = UInt32(MemoryLayout.size) + var address = mutePropertyAddress() + let status = AudioObjectGetPropertyData(device, &address, 0, nil, &size, &mute) + return status == noErr && mute != 0 + } + + private static func setMuted(_ muted: Bool) { + guard let device = defaultOutputDevice() else { return } + var mute = UInt32(muted ? 1 : 0) + var address = mutePropertyAddress() + // Silently ignore devices that don't support the mute property (e.g. some BT sinks) + AudioObjectSetPropertyData(device, &address, 0, nil, UInt32(MemoryLayout.size), &mute) + } + + private static func defaultOutputDevice() -> AudioDeviceID? { + var device = AudioDeviceID(kAudioObjectUnknown) + var size = UInt32(MemoryLayout.size) + var address = AudioObjectPropertyAddress( + mSelector: kAudioHardwarePropertyDefaultOutputDevice, + mScope: kAudioObjectPropertyScopeGlobal, + mElement: kAudioObjectPropertyElementMain + ) + let status = AudioObjectGetPropertyData( + AudioObjectID(kAudioObjectSystemObject), &address, 0, nil, &size, &device + ) + return (status == noErr && device != kAudioObjectUnknown) ? device : nil + } + + private static func mutePropertyAddress() -> AudioObjectPropertyAddress { + AudioObjectPropertyAddress( + mSelector: kAudioDevicePropertyMute, + mScope: kAudioDevicePropertyScopeOutput, + mElement: kAudioObjectPropertyElementMain + ) + } +} diff --git a/Wave/Views/HomeView.swift b/Wave/Views/HomeView.swift index a7d556c..df95f10 100644 --- a/Wave/Views/HomeView.swift +++ b/Wave/Views/HomeView.swift @@ -41,6 +41,7 @@ struct HomeView: View { Text("Transcription") .font(.headline) Toggle("Include punctuation", isOn: $state.includePunctuation) + Toggle("Mute system audio while dictating", isOn: $state.muteSystemAudio) } // Model section