8 Commits

48 changed files with 1038 additions and 1539 deletions

View File

@ -17,17 +17,17 @@
"endTime": 30,
"filename": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\audio-service\\recordings\\audio_capture_20260220_193822.wav",
"name": "Pee pee\npoo poo",
"playbackType": "playStop",
"startTime": 27.756510985786615,
"volume": 1
"playbackType": "playOverlap",
"startTime": 27.76674010920584,
"volume": 0.25
},
{
"endTime": 28.597210828548004,
"endTime": 27.516843118383072,
"filename": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\audio-service\\recordings\\audio_capture_20260220_200442.wav",
"name": "Clip 20260220_200442",
"playbackType": "playStop",
"startTime": 26.1853978671042,
"volume": 1
"playbackType": "playOverlap",
"startTime": 25.120307988450435,
"volume": 0.64
}
]
}

View File

@ -1,10 +1,17 @@
{
"input_device": {
"default_samplerate": 44100.0,
"index": 1,
"max_input_channels": 8,
"name": "VM Mic mix (VB-Audio Voicemeete"
"channels": 2,
"default_samplerate": 48000,
"index": 55,
"name": "VM Mic mix (VB-Audio Voicemeeter VAIO)"
},
"save_path": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\audio-service\\recordings",
"recording_length": 30
"recording_length": 30,
"output_device": {
"channels": 2,
"default_samplerate": 48000,
"index": 45,
"name": "VM to Discord (VB-Audio Voicemeeter VAIO)"
},
"http_port": 5010
}

Binary file not shown.

View File

@ -0,0 +1,64 @@
import scipy.signal
import scipy.io.wavfile as wavfile
import numpy as np
import os
class AudioClip:
def __init__(self, metadata, target_sample_rate=44100):
"""
metadata: dict with keys 'filename', 'start', 'end' (seconds)
target_sample_rate: sample rate for playback
"""
self.metadata = metadata
self.file_path = metadata['filename']
self.start = metadata.get('startTime', 0)
self.end = metadata.get('endTime', None)
self.target_sample_rate = target_sample_rate
self.volume = metadata.get('volume', 1.0)
self.finished = False
self.audio_data, self.sample_rate = self._load_and_process_audio()
print(f"AudioClip created for {self.file_path} with start={self.start}s, end={self.end}s, sample_rate={self.sample_rate}Hz, length={len(self.audio_data)/self.sample_rate:.2f}s")
self.position = 0 # sample index for playback
def _load_and_process_audio(self):
# Load audio file
sample_rate, data = wavfile.read(self.file_path)
# Convert to float32
if data.dtype != np.float32:
data = data.astype(np.float32) / np.max(np.abs(data))
# Convert to mono if needed
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample if needed
if sample_rate != self.target_sample_rate:
num_samples = int(len(data) * self.target_sample_rate / sample_rate)
data = scipy.signal.resample(data, num_samples)
sample_rate = self.target_sample_rate
# Cache only the clip region
start_sample = int(self.start * sample_rate)
end_sample = int(self.end * sample_rate) if self.end else len(data)
cached = data[start_sample:end_sample]
cached *= self.volume # Apply volume
return cached, sample_rate
def get_samples(self, num_samples):
# Return next chunk for playback
if self.position >= len(self.audio_data):
self.finished = True
return np.zeros(num_samples, dtype=np.float32)
end_pos = min(self.position + num_samples, len(self.audio_data))
chunk = self.audio_data[self.position:end_pos]
self.position = end_pos
if self.position >= len(self.audio_data):
self.finished = True
# Pad if chunk is short
if len(chunk) < num_samples:
chunk = np.pad(chunk, (0, num_samples - len(chunk)), mode='constant')
return chunk
def is_finished(self):
return self.finished
def reset(self):
self.position = 0
self.finished = False

View File

@ -0,0 +1,166 @@
import sounddevice as sd
import numpy as np
import os
from datetime import datetime
import scipy.io.wavfile as wavfile
from metadata_manager import MetaDataManager
from audio_clip import AudioClip
# AudioClip class for clip playback
class AudioIO:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
# print("Creating new AudioRecorder instance")
cls._instance = super().__new__(cls)
cls._instance.init()
return cls._instance
def init(self):
self.duration = 30
self.channels = 2
self.input_sample_rate = 44100
self.output_sample_rate = 44100
self.buffer = np.zeros((int(self.duration * self.input_sample_rate), self.channels), dtype=np.float32)
self.recordings_dir = "recordings"
sd.default.latency = 'low'
self.in_stream = sd.InputStream(
callback=self.record_callback
)
self.out_stream = sd.OutputStream(
callback=self.playback_callback,
latency=3
)
self.clip_map = {}
def refresh_streams(self):
was_active = self.in_stream.active
if was_active:
self.in_stream.stop()
self.out_stream.stop()
self.buffer = np.zeros((int(self.duration * self.input_sample_rate), self.channels), dtype=np.float32)
# print(f"AudioRecorder initialized with duration={self.duration}s, sample_rate={self.sample_rate}Hz, channels={self.channels}")
self.in_stream = sd.InputStream(
callback=self.record_callback
)
self.out_stream = sd.OutputStream(
callback=self.playback_callback
)
if was_active:
self.in_stream.start()
self.out_stream.start()
def record_callback(self, indata, frames, time, status):
if status:
# print(f"Recording status: {status}")
pass
# Circular buffer implementation
self.buffer = np.roll(self.buffer, -frames, axis=0)
self.buffer[-frames:] = indata
def playback_callback(self, outdata, frames, time, status):
if status:
# print(f"Playback status: {status}")
pass
outdata.fill(0)
# Iterate over a copy of the items to avoid modifying the dictionary during iteration
for clip_id, clip_list in list(self.clip_map.items()):
for clip in clip_list[:]: # Iterate over a copy of the list
if not clip.is_finished():
samples = clip.get_samples(frames)
outdata[:] += samples.reshape(-1, 1) # Mix into output
if clip.is_finished():
self.clip_map[clip_id].remove(clip)
if len(self.clip_map[clip_id]) == 0:
del self.clip_map[clip_id]
break # Exit inner loop since the key is deleted
def save_last_n_seconds(self):
# Create output directory if it doesn't exist
os.makedirs(self.recordings_dir, exist_ok=True)
# Generate filename with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = os.path.join(self.recordings_dir, f"audio_capture_{timestamp}.wav")
# Normalize audio to prevent clipping
audio_data = self.buffer / np.max(np.abs(self.buffer)) * .5
# Convert float32 to int16 for WAV file
audio_data_int16 = (audio_data * 32767).astype(np.int16)
# Write buffer to file
wavfile.write(filename, int(self.input_sample_rate), audio_data_int16)
meta = MetaDataManager()
clip_metadata = {
"filename": filename,
"name": f"Clip {timestamp}",
"playbackType":"playStop",
"volume": 1.0,
}
meta.add_clip_to_collection("Uncategorized", clip_metadata )
return clip_metadata
def set_buffer_duration(self, duration):
self.duration = duration
self.buffer = np.zeros((int(duration * self.input_sample_rate), self.channels), dtype=np.float32)
def set_recording_directory(self, directory):
self.recordings_dir = directory
def start_recording(self):
if(self.in_stream.active):
# print("Already recording")
return
# print('number of channels', self.channels)
self.in_stream.start()
self.out_stream.start()
self.output_sample_rate = self.out_stream.samplerate
self.input_sample_rate = self.in_stream.samplerate
def stop_recording(self):
if(not self.in_stream.active):
# print("Already stopped")
return
self.in_stream.stop()
self.out_stream.stop()
def is_recording(self):
return self.in_stream.active
def play_clip(self, clip_metadata):
print(f"Playing clip: {clip_metadata}")
clip_id = clip_metadata.get("filename")
if clip_metadata.get("playbackType") == "playStop":
if clip_id in self.clip_map:
del self.clip_map[clip_id]
return
else:
self.clip_map[clip_id] = []
if clip_id not in self.clip_map:
self.clip_map[clip_id] = []
self.clip_map[clip_id].append(AudioClip(clip_metadata, target_sample_rate=self.output_sample_rate))

View File

@ -1,156 +0,0 @@
import sounddevice as sd
import numpy as np
import os
from datetime import datetime
import scipy.io.wavfile as wavfile
from metadata_manager import MetaDataManager
class AudioRecorder:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
print("Creating new AudioRecorder instance")
cls._instance = super().__new__(cls)
cls._instance.init()
return cls._instance
def init(self):
"""
Initialize audio recorder with configurable parameters.
:param duration: Length of audio buffer in seconds
:param sample_rate: Audio sample rate (if None, use default device sample rate)
:param channels: Number of audio channels
"""
print(f"Initializing AudioRecorder")
self.duration = 30
self.sample_rate = 44100
self.channels = 2
self.buffer = np.zeros((int(self.duration * self.sample_rate), self.channels), dtype=np.float32)
self.recordings_dir = "recordings"
self.stream = sd.InputStream(
samplerate=self.sample_rate,
channels=self.channels,
callback=self.record_callback
)
def refresh_stream(self):
"""
Refresh the audio stream with updated parameters.
"""
was_active = self.stream.active
if was_active:
self.stream.stop()
self.buffer = np.zeros((int(self.duration * self.sample_rate), self.channels), dtype=np.float32)
self.stream = sd.InputStream(
samplerate=self.sample_rate,
channels=self.channels,
callback=self.record_callback
)
if was_active:
self.stream.start()
def record_callback(self, indata, frames, time, status):
"""
Circular buffer callback for continuous recording.
:param indata: Input audio data
:param frames: Number of frames
:param time: Timestamp
:param status: Recording status
"""
if status:
print(f"Recording status: {status}")
# Circular buffer implementation
self.buffer = np.roll(self.buffer, -frames, axis=0)
self.buffer[-frames:] = indata
def save_last_n_seconds(self):
"""
Save the last n seconds of audio to a file.
:param output_dir: Directory to save recordings
:return: Path to saved audio file
"""
# Create output directory if it doesn't exist
os.makedirs(self.recordings_dir, exist_ok=True)
# Generate filename with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = os.path.join(self.recordings_dir, f"audio_capture_{timestamp}.wav")
# Normalize audio to prevent clipping
audio_data = self.buffer / np.max(np.abs(self.buffer)) * .5
# Convert float32 to int16 for WAV file
audio_data_int16 = (audio_data * 32767).astype(np.int16)
# Write buffer to file
wavfile.write(filename, int(self.sample_rate), audio_data_int16)
meta = MetaDataManager()
clip_metadata = {
"filename": filename,
"name": f"Clip {timestamp}",
"playbackType":"playStop",
"volume": 1.0,
}
meta.add_clip_to_collection("Uncategorized", clip_metadata )
return clip_metadata
def set_buffer_duration(self, duration):
"""
Set the duration of the audio buffer.
:param duration: New buffer duration in seconds
"""
self.duration = duration
self.buffer = np.zeros((int(duration * self.sample_rate), self.channels), dtype=np.float32)
def set_recording_directory(self, directory):
"""
Set the directory where recordings will be saved.
:param directory: Path to the recordings directory
"""
self.recordings_dir = directory
def start_recording(self):
"""
Start continuous audio recording with circular buffer.
"""
if(self.stream.active):
print("Already recording")
return
print('number of channels', self.channels)
self.stream.start()
def stop_recording(self):
"""
Stop continuous audio recording with circular buffer.
"""
if(not self.stream.active):
print("Already stopped")
return
self.stream.stop()
def is_recording(self):
"""
Check if the audio stream is currently active.
:return: True if recording, False otherwise
"""
return self.stream.active

View File

@ -1,7 +1,7 @@
import argparse
import os
import sys
from audio_recorder import AudioRecorder
from audio_io import AudioIO
from windows_audio import WindowsAudioManager
import sounddevice as sd
from metadata_manager import MetaDataManager
@ -41,28 +41,27 @@ def main():
os.makedirs(settings.get_settings('save_path'), exist_ok=True)
io = AudioIO()
io.start_recording()
# Register blueprints
app.register_blueprint(recording_bp)
app.register_blueprint(device_bp)
app.register_blueprint(metadata_bp)
app.register_blueprint(settings_bp)
app.run(host='127.0.0.1', port=args.osc_port, debug=False, use_reloader=True)
app.run(host='127.0.0.1', port=settings.get_settings('http_port'), debug=False, use_reloader=True)
# socketio.run(app, host='127.0.0.1', port=args.osc_port, debug=False, use_reloader=True)
# Run the OSC server
try:
print(f"Starting OSC Recording Server on port {args.osc_port}")
# osc_server.run_server()
except KeyboardInterrupt:
print("\nServer stopped by user.")
except Exception as e:
print(f"Error starting server: {e}")
sys.exit(1)
# try:
# print(f"Starting OSC Recording Server on port {args.osc_port}")
# # osc_server.run_server()
# except KeyboardInterrupt:
# print("\nServer stopped by user.")
# except Exception as e:
# print(f"Error starting server: {e}")
# sys.exit(1)
if __name__ == "__main__":

View File

@ -1,11 +1,11 @@
from flask import Blueprint, request, jsonify
from windows_audio import WindowsAudioManager
from audio_recorder import AudioRecorder
from audio_io import AudioIO
device_bp = Blueprint('device', __name__)
audio_manager = WindowsAudioManager()
recorder = AudioRecorder()
recorder = AudioIO()
# @device_bp.route('/device/set', methods=['POST'])
# def set_audio_device():
@ -19,13 +19,13 @@ recorder = AudioRecorder()
# except Exception as e:
# return jsonify({'status': 'error', 'message': str(e)}), 400
@device_bp.route('/device/get', methods=['GET'])
def get_audio_device():
try:
device_info = audio_manager.get_default_device('input')
return jsonify({'status': 'success', 'device_info': device_info})
except Exception as e:
return jsonify({'status': 'error', 'message': str(e)}), 400
# @device_bp.route('/device/get', methods=['GET'])
# def get_audio_device():
# try:
# device_info = audio_manager.get_default_device('input')
# return jsonify({'status': 'success', 'device_info': device_info})
# except Exception as e:
# return jsonify({'status': 'error', 'message': str(e)}), 400
@device_bp.route('/device/list', methods=['GET'])
def list_audio_devices():

View File

@ -91,7 +91,7 @@ def edit_clip_in_collection():
meta_manager = MetaDataManager()
collection_name = request.json.get('name')
clip_metadata = request.json.get('clip')
print(f"Received request to edit clip in collection '{collection_name}': {clip_metadata}")
# print(f"Received request to edit clip in collection '{collection_name}': {clip_metadata}")
try:
meta_manager.edit_clip_in_collection(collection_name, clip_metadata)
collections = meta_manager.collections

View File

@ -1,36 +1,36 @@
from flask import Blueprint, request, jsonify
from audio_recorder import AudioRecorder
from audio_io import AudioIO
import os
recording_bp = Blueprint('recording', __name__)
@recording_bp.route('/record/start', methods=['POST'])
def start_recording():
recorder = AudioRecorder()
recorder = AudioIO()
print('HTTP: Starting audio recording')
recorder.start_recording()
return jsonify({'status': 'recording started'})
@recording_bp.route('/record/stop', methods=['POST'])
def stop_recording():
recorder = AudioRecorder()
print('HTTP: Stopping audio recording')
recorder = AudioIO()
# print('HTTP: Stopping audio recording')
recorder.stop_recording()
return jsonify({'status': 'recording stopped'})
@recording_bp.route('/record/save', methods=['POST'])
def save_recording():
recorder = AudioRecorder()
print('HTTP: Saving audio recording')
recorder = AudioIO()
# print('HTTP: Saving audio recording')
saved_file = recorder.save_last_n_seconds()
return jsonify({'status': 'recording saved', 'file': saved_file})
@recording_bp.route('/record/status', methods=['GET'])
def recording_status():
recorder = AudioRecorder()
print('HTTP: Checking recording status')
recorder = AudioIO()
# print('HTTP: Checking recording status')
status = 'recording' if recorder.is_recording() else 'stopped'
return jsonify({'status': status})
@ -45,8 +45,12 @@ def recording_delete():
@recording_bp.route('/playback/start', methods=['POST'])
def playback_start():
print('HTTP: Starting audio playback')
print(f"Playing clip")
# print('HTTP: Starting audio playback')
clip = request.json
try:
io = AudioIO()
io.play_clip(clip)
# os.remove(filename)
return jsonify({'status': 'success'})
except Exception as e:

View File

@ -16,10 +16,16 @@ def get_setting(name):
else:
return jsonify({'status': 'error', 'message': f'Setting "{name}" not found'}), 404
@settings_bp.route('/settings/<name>', methods=['POST'])
def set_setting(name):
value = request.json.get('value')
if value is None:
return jsonify({'status': 'error', 'message': 'Value is required'}), 400
SettingsManager().set_settings(name, value)
return jsonify({'status': 'success', 'name': name, 'value': value})
@settings_bp.route('/settings/update', methods=['POST'])
def set_all_settings():
settings = request.json.get('settings')
print (f"Received settings update: {settings}")
if settings is None:
return jsonify({'status': 'error', 'message': 'Settings are required'}), 400
try:
for name, value in settings.items():
print(f"Updating setting '{name}' to '{value}'")
SettingsManager().set_settings(name, value)
return jsonify({'status': 'success', 'settings': settings})
except ValueError as e:
return jsonify({'status': 'error', 'message': str(e)}), 400

View File

@ -1,6 +1,6 @@
import os
import json
from audio_recorder import AudioRecorder
from audio_io import AudioIO
from windows_audio import WindowsAudioManager
class SettingsManager:
@ -13,6 +13,7 @@ class SettingsManager:
return cls._instance
def init(self):
# read settings file from executing directory
print("Initializing SettingsManager", os.getcwd())
self.settings_file = os.path.join(os.getcwd(), "settings.json")
if os.path.exists(self.settings_file):
with open(self.settings_file, "r") as f:
@ -20,21 +21,49 @@ class SettingsManager:
else:
self.settings = {
"input_device": None,
"output_device": None,
"save_path": os.path.join(os.getcwd(), "recordings"),
"recording_length": 15
}
audio_manager = WindowsAudioManager()
devices = audio_manager.list_audio_devices('input')
print(f"Available input devices: {self.settings}")
input = self.settings["input_device"]
input_devices = audio_manager.list_audio_devices('input')
output_devices = audio_manager.list_audio_devices('output')
# print("Available input devices:")
# for i, dev in enumerate(input_devices):
# print(i, dev['name'])
# print("Available output devices:")
# for i, dev in enumerate(output_devices):
# print(i, dev['name'])
# print(f"Available input devices: {input_devices}")
# print(f"Available output devices: {output_devices}")
input = None
output = None
if("input_device" in self.settings):
input = self.settings["input_device"]
if("output_device" in self.settings):
output = self.settings["output_device"]
#see if input device is in "devices", if not set to the first index
if input is not None and any(d['name'] == input["name"] for d in devices):
print(f"Using saved input device index: {input}")
if input is not None and any(d['name'] == input["name"] for d in input_devices):
# print(f"Using saved input device index: {input}")
pass
else:
input = devices[0] if devices else None
input = input_devices[0] if input_devices else None
self.settings["input_device"] = input
#see if output device is in "devices", if not set to the first index
if output is not None and any(d['name'] == output["name"] for d in output_devices):
# print(f"Using saved output device index: {output}")
pass
else:
output = output_devices[0] if output_devices else None
self.settings["output_device"] = output
if not "http_port" in self.settings:
self.settings["http_port"] = 5010
self.save_settings()
@ -48,6 +77,8 @@ class SettingsManager:
return self.settings
def set_settings(self, name, value):
if(name not in self.settings):
raise ValueError(f"Setting '{name}' not found.")
self.settings[name] = value
self.save_settings()
@ -57,13 +88,14 @@ class SettingsManager:
json.dump(self.settings, f, indent=4)
def refresh_settings(self):
recorder = AudioRecorder()
recorder = AudioIO()
# Update recorder parameters based on new setting
recorder.set_buffer_duration(self.get_settings('recording_length'))
recorder.recordings_dir = self.get_settings('save_path')
audio_manager = WindowsAudioManager()
audio_manager.set_default_input_device(self.get_settings('input_device')['index'])
audio_manager.set_default_output_device(self.get_settings('output_device')['index'])
recorder.refresh_stream()
recorder.refresh_streams()

View File

@ -18,7 +18,19 @@ class WindowsAudioManager:
"""
Initialize Windows audio device and volume management.
"""
self.devices = sd.query_devices()
host_apis = sd.query_hostapis()
wasapi_device_indexes = None
for api in host_apis:
if api['name'].lower() == 'Windows WASAPI'.lower():
wasapi_device_indexes = api['devices']
break
# print(f"Host APIs: {host_apis}")
# print(f"WASAPI Device Indexes: {wasapi_device_indexes}")
wasapi_device_indexes = set(wasapi_device_indexes) if wasapi_device_indexes is not None else set()
self.devices = [dev for dev in sd.query_devices() if dev['index'] in wasapi_device_indexes]
# self.devices = sd.query_devices()
# print(f"devices: {self.devices}")
self.default_input = sd.default.device[0]
self.default_output = sd.default.device[1]
@ -34,7 +46,7 @@ class WindowsAudioManager:
{
'index': dev['index'],
'name': dev['name'],
'max_input_channels': dev['max_input_channels'],
'channels': dev['max_input_channels'],
'default_samplerate': dev['default_samplerate']
}
for dev in self.devices if dev['max_input_channels'] > 0
@ -44,7 +56,7 @@ class WindowsAudioManager:
{
'index': dev['index'],
'name': dev['name'],
'max_output_channels': dev['max_output_channels'],
'channels': dev['max_output_channels'],
'default_samplerate': dev['default_samplerate']
}
for dev in self.devices if dev['max_output_channels'] > 0
@ -69,7 +81,7 @@ class WindowsAudioManager:
def set_default_input_device(self, device_index):
if(device_index is None):
return self.get_current_input_device_sample_rate()
return 0
"""
Set the default input audio device.
@ -83,41 +95,18 @@ class WindowsAudioManager:
device_info = sd.query_devices(device_index)
return device_info['default_samplerate']
def get_current_input_device_sample_rate(self):
def set_default_output_device(self, device_index):
if(device_index is None):
return self.get_current_output_device_sample_rate()
"""
Get the sample rate of the current input device.
Set the default output audio device.
:return: Sample rate of the current input device
:param device_index: Index of the audio device
:return: Sample rate of the selected device
"""
device_info = sd.query_devices(self.default_input)
sd.default.device[1] = device_index
self.default_output = device_index
# Get the sample rate of the selected device
device_info = sd.query_devices(device_index)
return device_info['default_samplerate']
def get_system_volume(self):
"""
Get the system master volume.
:return: Current system volume (0.0 to 1.0)
"""
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_,
CLSCTX_ALL,
None
)
volume = interface.QueryInterface(IAudioEndpointVolume)
return volume.GetMasterVolumeLevelScalar()
def set_system_volume(self, volume_level):
"""
Set the system master volume.
:param volume_level: Volume level (0.0 to 1.0)
"""
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_,
CLSCTX_ALL,
None
)
volume = interface.QueryInterface(IAudioEndpointVolume)
volume.SetMasterVolumeLevelScalar(volume_level, None)

16
electron-ui/settings.json Normal file
View File

@ -0,0 +1,16 @@
{
"input_device": {
"index": 49,
"name": "Microphone (Logi C615 HD WebCam)",
"channels": 1,
"default_samplerate": 48000.0
},
"output_device": {
"index": 40,
"name": "Speakers (Realtek(R) Audio)",
"channels": 2,
"default_samplerate": 48000.0
},
"save_path": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\electron-ui\\recordings",
"recording_length": 15
}

View File

@ -1,5 +1,7 @@
const AudioChannels = {
LOAD_AUDIO_BUFFER: 'audio:loadAudioBuffer',
GET_PORT: 'audio:getPort',
RESTART_SERVICE: 'audio:restartService',
} as const;
export default AudioChannels;

View File

@ -2,6 +2,7 @@ import { ipcMain } from 'electron';
import fs from 'fs';
import AudioChannels from './channels';
import { LoadAudioBufferArgs, LoadAudioBufferResult } from './types';
import PythonSubprocessManager from '../../main/service';
export default function registerAudioIpcHandlers() {
ipcMain.handle(
@ -15,4 +16,25 @@ export default function registerAudioIpcHandlers() {
}
},
);
ipcMain.handle(AudioChannels.GET_PORT, async () => {
try {
if (PythonSubprocessManager.instance?.portNumber) {
return { port: PythonSubprocessManager.instance.portNumber };
}
return { error: 'Port number not available yet.' };
} catch (err: any) {
return { error: err.message };
}
});
ipcMain.handle(AudioChannels.RESTART_SERVICE, async () => {
try {
PythonSubprocessManager.instance?.restart();
return { success: true };
} catch (err: any) {
return { success: false, error: err.message };
}
});
}

View File

@ -6,3 +6,22 @@ export interface LoadAudioBufferResult {
buffer?: Buffer;
error?: string;
}
export interface GetPortResult {
port?: number;
error?: string;
}
export interface SetPortArgs {
port: number;
}
export interface SetPortResult {
success: boolean;
error?: string;
}
export interface RestartServiceResult {
success: boolean;
error?: string;
}

View File

@ -16,6 +16,7 @@ import log from 'electron-log';
import MenuBuilder from './menu';
import { resolveHtmlPath } from './util';
import registerFileIpcHandlers from '../ipc/audio/main';
import PythonSubprocessManager from './service';
class AppUpdater {
constructor() {
@ -110,6 +111,10 @@ const createWindow = async () => {
});
registerFileIpcHandlers();
const pythonManager = new PythonSubprocessManager('src/main.py');
pythonManager.start();
// Remove this if your app does not use auto updates
// eslint-disable-next-line
new AppUpdater();

View File

@ -1,8 +1,7 @@
// Disable no-unused-vars, broken for spread args
/* eslint no-unused-vars: off */
import { contextBridge, ipcRenderer, IpcRendererEvent } from 'electron';
import FileChannels from '../ipc/audio/channels';
import { LoadAudioBufferArgs, ReadTextArgs } from '../ipc/audio/types';
import { LoadAudioBufferArgs } from '../ipc/audio/types';
import AudioChannels from '../ipc/audio/channels';
// import '../ipc/file/preload'; // Import file API preload to ensure it runs and exposes the API
@ -41,10 +40,8 @@ const audioHandler = {
filePath,
} satisfies LoadAudioBufferArgs),
readText: (filePath: string) =>
ipcRenderer.invoke(AudioChannels.READ_TEXT, {
filePath,
} satisfies ReadTextArgs),
getPort: () => ipcRenderer.invoke(AudioChannels.GET_PORT),
restartService: () => ipcRenderer.invoke(AudioChannels.RESTART_SERVICE),
};
contextBridge.exposeInMainWorld('audio', audioHandler);

View File

@ -0,0 +1,79 @@
import { spawn, ChildProcessWithoutNullStreams } from 'child_process';
import path from 'path';
export default class PythonSubprocessManager {
// eslint-disable-next-line no-use-before-define
public static instance: PythonSubprocessManager | null = null;
private process: ChildProcessWithoutNullStreams | null = null;
private scriptPath: string;
private working_dir: string = path.join(
__dirname,
'..',
'..',
'..',
'audio-service',
);
public portNumber: number | null = null;
constructor(scriptPath: string) {
this.scriptPath = scriptPath;
PythonSubprocessManager.instance = this;
}
start(args: string[] = []): void {
if (this.process) {
throw new Error('Process already running.');
}
console.log(`Using Python working directory at: ${this.working_dir}`);
console.log(`Starting Python subprocess with script: ${this.scriptPath}`);
this.process = spawn(
'venv/Scripts/python.exe',
[this.scriptPath, ...args],
{
cwd: this.working_dir,
detached: false,
stdio: 'pipe',
},
);
this.process.stdout.on('data', (data: Buffer) => {
console.log(`Python stdout: ${data.toString()}`);
});
this.process.stderr.on('data', (data: Buffer) => {
// console.error(`Python stderr: ${data.toString()}`);
const lines = data.toString().split('\n');
// eslint-disable-next-line no-restricted-syntax
for (const line of lines) {
const match = line.match(/Running on .*:(\d+)/);
if (match) {
const port = parseInt(match[1], 10);
console.log(`Detected port: ${port}`);
this.portNumber = port;
}
}
});
this.process.on('exit', () => {
console.log('Python subprocess exited.');
this.process = null;
});
}
stop(): void {
if (this.process) {
this.process.kill();
this.process = null;
}
}
restart(args: string[] = []): void {
this.stop();
this.start(args);
}
isHealthy(): boolean {
return !!this.process && !this.process.killed;
}
}

View File

@ -2,6 +2,8 @@ import { MemoryRouter as Router, Routes, Route } from 'react-router-dom';
import { useEffect, useState } from 'react';
import { Provider } from 'react-redux';
import Dialog from '@mui/material/Dialog';
import { ThemeProvider, createTheme } from '@mui/material/styles';
import DialogTitle from '@mui/material/DialogTitle';
import DialogContent from '@mui/material/DialogContent';
import DialogActions from '@mui/material/DialogActions';
@ -10,6 +12,9 @@ import './App.css';
import ClipList from './components/ClipList';
import { useAppDispatch, useAppSelector } from './hooks';
import { store } from '../redux/main';
import { useNavigate } from 'react-router-dom';
import SettingsPage from './Settings';
import apiFetch from './api';
function MainPage() {
const dispatch = useAppDispatch();
@ -21,11 +26,12 @@ function MainPage() {
);
const [newCollectionOpen, setNewCollectionOpen] = useState(false);
const [newCollectionName, setNewCollectionName] = useState<string>('');
const navigate = useNavigate();
useEffect(() => {
const fetchMetadata = async () => {
try {
const response = await fetch('http://localhost:5010/meta');
const response = await apiFetch('meta');
const data = await response.json();
dispatch({ type: 'metadata/setAllData', payload: data });
} catch (error) {
@ -137,6 +143,22 @@ function MainPage() {
</li>
))}
</ul>
{/* Settings Button at Bottom Left */}
<div className="mt-auto mb-2">
<button
type="button"
className="w-full rounded px-4 py-2 bg-neutral-800 text-offwhite hover:bg-plumDark text-left"
style={{
position: 'absolute',
bottom: 16,
left: 8,
width: 'calc(100% - 16px)',
}}
onClick={() => navigate('/settings')}
>
Settings
</button>
</div>
</nav>
{/* Main Content */}
<div
@ -150,13 +172,39 @@ function MainPage() {
}
export default function App() {
const theme = createTheme({
colorSchemes: {
light: false,
dark: {
palette: {
primary: {
main: '#6e44ba', // plum
dark: '#6e44ba', // plum
contrastText: '#ffffff',
},
secondary: {
main: '#4f3186', // plumDark
dark: '#4f3186', // plumDark
contrastText: '#ffffff',
},
},
},
},
// colorSchemes: {
// light: false,
// dark: true,
// },
});
return (
<Provider store={store}>
<Router>
<Routes>
<Route path="/" element={<MainPage />} />
</Routes>
</Router>
<ThemeProvider theme={theme}>
<Router>
<Routes>
<Route path="/" element={<MainPage />} />
<Route path="/settings" element={<SettingsPage />} />
</Routes>
</Router>
</ThemeProvider>
</Provider>
);
}

View File

@ -0,0 +1,274 @@
import React, { useEffect, useState } from 'react';
import { useNavigate } from 'react-router-dom';
import './App.css';
import TextField from '@mui/material/TextField';
import Select from '@mui/material/Select';
import MenuItem from '@mui/material/MenuItem';
import apiFetch from './api';
type AudioDevice = {
index: number;
name: string;
default_sample_rate: number;
channels: number;
};
type Settings = {
http_port: number;
input_device: AudioDevice;
output_device: AudioDevice;
recording_length: number;
save_path: string;
};
const defaultSettings: Settings = {
http_port: 0,
input_device: { index: 0, name: '', default_sample_rate: 0, channels: 0 },
output_device: { index: 0, name: '', default_sample_rate: 0, channels: 0 },
recording_length: 0,
save_path: '',
};
async function fetchAudioDevices(
type: 'input' | 'output',
): Promise<AudioDevice[]> {
// Replace with actual backend call
// Example: return window.api.getAudioDevices();
return apiFetch(`device/list?device_type=${type}`)
.then((res) => res.json())
.then((data) => data.devices as AudioDevice[])
.catch((error) => {
console.error('Error fetching audio devices:', error);
return [];
});
}
async function fetchSettings(): Promise<Settings> {
// Replace with actual backend call
// Example: return window.api.getAudioDevices();
console.log('Fetching settings from backend...');
return apiFetch('settings')
.then((res) => res.json())
.then((data) => data.settings as Settings)
.catch((error) => {
console.error('Error fetching settings:', error);
return defaultSettings;
});
}
const sendSettingsToBackend = async (settings: Settings) => {
// Replace with actual backend call
// Example: window.api.updateSettings(settings);
console.log('Settings updated:', settings);
await apiFetch('settings/update', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ settings }),
})
.then((res) => res.json())
.then((data) => {
console.log('Settings update response:', data);
if (data.status === 'success') {
window.audio.restartService();
}
return data;
})
.catch((error) => {
console.error('Error updating settings:', error);
});
};
export default function SettingsPage() {
const [settings, setSettings] = useState<Settings>(defaultSettings);
const [inputDevices, setInputDevices] = useState<AudioDevice[]>([]);
const [outputDevices, setOutputDevices] = useState<AudioDevice[]>([]);
const navigate = useNavigate();
useEffect(() => {
fetchSettings()
.then((fetchedSettings) => {
console.log('Fetched settings:', fetchedSettings);
setSettings(fetchedSettings);
return null;
})
.then(() => {
return fetchAudioDevices('input');
})
.then((devices) => {
setInputDevices(devices);
// console.log('Input devices:', devices);
return fetchAudioDevices('output');
})
.then((devices) => {
setOutputDevices(devices);
// console.log('Output devices:', devices);
return devices;
})
.catch((error) => {
console.error('Error fetching audio devices:', error);
});
}, []);
useEffect(() => {}, [settings]);
const handleChange = () => {
sendSettingsToBackend(settings);
// const { name, value } = e.target;
// setSettings((prev) => ({
// ...prev,
// [name]: value,
// }));
};
const handleFolderChange = async () => {
// Replace with actual folder picker
// Example: const folder = await window.api.selectFolder();
// const folder = window.prompt(
// 'Enter output folder path:',
// settings.outputFolder,
// );
// if (folder !== null) {
// setSettings((prev) => ({
// ...prev,
// outputFolder: folder,
// }));
// }
};
return (
<div className="min-w-screen min-h-screen bg-midnight text-offwhite flex items-center justify-center relative">
<div className="w-3/4 min-w-[600px] max-w-[800px] self-start flex flex-col font-sans bg-midnight text-offwhite p-6 rounded-lg relative">
{/* X Close Button */}
<button
type="button"
className="absolute top-6 right-6 text-3xl font-bold text-offwhite bg-transparent hover:text-plumDark"
aria-label="Close settings"
onClick={() => navigate('/')}
>
×
</button>
<span className="text-2xl font-bold mb-4">Settings</span>
<div className="mb-4 flex justify-between">
<span>HTTP Port:</span>
<TextField
variant="standard"
type="text"
name="httpPort"
value={settings.http_port}
onBlur={() => handleChange()}
onChange={(e) => {
if (!Number.isNaN(Number(e.target.value))) {
setSettings((prev) => ({
...prev,
http_port: Number(e.target.value),
}));
}
}}
className="ml-2 text-white w-[150px]"
/>
</div>
<div className="mb-4 flex justify-between">
<span>Input Audio Device:</span>
<Select
variant="standard"
name="inputDevice"
value={settings.input_device.index}
onChange={(e) => {
const newDevice = inputDevices.find(
(dev) => dev.index === Number(e.target.value),
);
console.log('Selected input device index:', newDevice);
if (newDevice) {
setSettings((prev) => ({
...prev,
input_device: newDevice,
}));
sendSettingsToBackend({
...settings,
input_device: newDevice,
});
}
}}
className="ml-2 w-64"
>
{inputDevices.map((dev) => (
<MenuItem key={dev.index} value={dev.index}>
{dev.name}
</MenuItem>
))}
</Select>
</div>
<div className="mb-4 flex justify-between">
<span>Output Audio Device:</span>
<Select
variant="standard"
name="outputDevice"
value={settings.output_device.index}
onChange={(e) => {
const newDevice = outputDevices.find(
(dev) => dev.index === Number(e.target.value),
);
if (newDevice) {
setSettings((prev) => ({
...prev,
output_device: newDevice,
}));
sendSettingsToBackend({
...settings,
output_device: newDevice,
});
}
}}
className="ml-2 w-64"
>
{outputDevices.map((dev) => (
<MenuItem key={dev.index} value={dev.index}>
{dev.name}
</MenuItem>
))}
</Select>
</div>
<div className="mb-4 flex justify-between">
<span>Recording Length (seconds):</span>
<TextField
variant="standard"
type="text"
name="recordingLength"
value={settings.recording_length}
onChange={(e) => {
if (!Number.isNaN(Number(e.target.value))) {
setSettings((prev) => ({
...prev,
recording_length: Number(e.target.value),
}));
}
}}
onBlur={() => handleChange()}
className="ml-2 w-[150px]"
/>
</div>
<div className="mb-4 flex justify-between">
<span>Clip Output Folder:</span>
<div className="flex justify-end">
<TextField
variant="standard"
type="text"
name="savePath"
value={settings.save_path}
className="ml-2 w-[300px]"
/>
<button
type="button"
onClick={handleFolderChange}
className="ml-2 px-3 py-1 rounded bg-plumDark text-offwhite hover:bg-plum"
>
...
</button>
</div>
</div>
</div>
</div>
);
}

View File

@ -0,0 +1,13 @@
const getBaseUrl = async () => {
const port = await window.audio.getPort();
if (port.error || !port.port) {
return `http://localhost:5010`;
}
// You can store the base URL in localStorage, a config file, or state
return `http://localhost:${port.port}`;
};
export default async function apiFetch(endpoint: string, options = {}) {
const url = `${await getBaseUrl()}/${endpoint}`;
return fetch(url, options);
}

View File

@ -9,6 +9,9 @@ import Dialog from '@mui/material/Dialog';
import DialogTitle from '@mui/material/DialogTitle';
import DialogContent from '@mui/material/DialogContent';
import DialogActions from '@mui/material/DialogActions';
import Slider from '@mui/material/Slider';
import ToggleButton from '@mui/material/ToggleButton';
import ToggleButtonGroup from '@mui/material/ToggleButtonGroup';
import { useWavesurfer } from '@wavesurfer/react';
import RegionsPlugin from 'wavesurfer.js/dist/plugins/regions.esm.js';
import ZoomPlugin from 'wavesurfer.js/dist/plugins/zoom.esm.js';
@ -19,8 +22,10 @@ import ArrowDownwardIcon from '@mui/icons-material/ArrowDownward';
import DeleteIcon from '@mui/icons-material/Delete';
import { useSortable } from '@dnd-kit/sortable';
import { CSS } from '@dnd-kit/utilities';
import { ClipMetadata } from '../../redux/types';
import { ClipMetadata, PlaybackType } from '../../redux/types';
import { useAppSelector } from '../hooks';
import PlayStopIcon from './playStopIcon';
import PlayOverlapIcon from './playOverlapIcon';
export interface AudioTrimmerProps {
metadata: ClipMetadata;
@ -43,6 +48,7 @@ export default function AudioTrimmer({
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const [dropdownOpen, setDropdownOpen] = useState(false);
const [nameInput, setNameInput] = useState<string>(metadata.name);
const [volumeInput, setVolumeInput] = useState<number>(metadata.volume ?? 1);
const collectionNames = useAppSelector((state) =>
state.collections.map((col) => col.name),
);
@ -223,6 +229,7 @@ export default function AudioTrimmer({
} else {
const allRegions = (plugins[0] as RegionsPlugin).getRegions();
if (allRegions.length > 0) {
wavesurfer.setVolume(metadata.volume ?? 1);
wavesurfer.play(allRegions[0].start, allRegions[0].end);
} else {
wavesurfer.play();
@ -401,12 +408,74 @@ export default function AudioTrimmer({
<div className="m-1 wavesurfer-scroll-container">
<div ref={containerRef} className="wavesurfer-inner" />
</div>
<div className="grid justify-items-stretch grid-cols-2 text-neutral-500">
<div className="m-1 flex justify-start">
<text className="text-sm ">
Clip: {formatTime(metadata.startTime ?? 0)} -{' '}
{formatTime(metadata.endTime ?? 0)}
</text>
<div className="flex justify-between mt-2">
<span className="w-1/5 flex-none text-sm text-neutral-500 self-center">
Clip: {formatTime(metadata.startTime ?? 0)} -{' '}
{formatTime(metadata.endTime ?? 0)}
</span>
<div className="w-3/5 flex-1 flex justify-center items-center">
<Slider
value={volumeInput}
min={0}
max={1}
step={0.01}
onChange={(e, newValue) => setVolumeInput(newValue as number)}
onChangeCommitted={(e, newValue) => {
const newVolume = newValue as number;
console.log('Volume change:', newVolume);
if (onSave) onSave({ ...metadata, volume: newVolume });
}}
color="secondary"
className="p-0 m-0"
/>
{/* <input
type="range"
min={0}
max={1}
step={0.01}
value={volumeInput}
onChange={(e) => {
const newVolume = parseFloat(e.target.value);
setVolumeInput(newVolume);
}}
onDragEnd={(e) => {
console.log('Volume change:');
// const newVolume = parseFloat(e.target.value);
// if (onSave) onSave({ ...metadata, volume: newVolume });
}}
className="mx-2 w-full accent-plum"
aria-label="Volume slider"
/> */}
</div>
<div className="w-1/5 flex justify-end text-sm text-neutral-500">
<ToggleButtonGroup value={metadata.playbackType}>
<ToggleButton
value="playStop"
color="primary"
onClick={() => {
if (onSave)
onSave({
...metadata,
playbackType: PlaybackType.PlayStop,
});
}}
>
<PlayStopIcon />
</ToggleButton>
<ToggleButton
value="playOverlap"
color="primary"
onClick={() => {
if (onSave)
onSave({
...metadata,
playbackType: PlaybackType.PlayOverlap,
});
}}
>
<PlayOverlapIcon />
</ToggleButton>
</ToggleButtonGroup>
</div>
</div>
</div>

View File

@ -15,6 +15,7 @@ import { restrictToVerticalAxis } from '@dnd-kit/modifiers';
import AudioTrimmer from './AudioTrimer';
import { ClipMetadata } from '../../redux/types';
import { useAppDispatch, useAppSelector } from '../hooks';
import apiFetch from '../api';
export interface ClipListProps {
collection: string;
@ -31,6 +32,33 @@ export default function ClipList({ collection }: ClipListProps) {
useSensor(PointerSensor, { activationConstraint: { distance: 5 } }),
);
const handleDrop = (event: React.DragEvent<HTMLDivElement>) => {
event.preventDefault();
console.log('Files dropped:', event.dataTransfer.files);
const files = Array.from(event.dataTransfer.files).filter((file) =>
file.type.startsWith('audio/'),
);
if (files.length > 0) {
const formData = new FormData();
files.forEach((file) => formData.append('files', file));
// todo send the file to the backend and add to the collection
// fetch('http://localhost:5010/file/upload', {
// method: 'POST',
// body: formData,
// })
// .then((res) => res.json())
// .catch((err) => console.error('Error uploading files:', err));
// Implement your onDrop logic here
// onDrop(files, selectedCollection);
}
};
const handleDragOver = (event: React.DragEvent<HTMLDivElement>) => {
event.preventDefault();
};
async function handleDragEnd(event: any) {
const { active, over } = event;
if (active.id !== over?.id) {
@ -50,19 +78,16 @@ export default function ClipList({ collection }: ClipListProps) {
payload: { collection, newMetadata },
});
try {
const response = await fetch(
'http://localhost:5010/meta/collection/clips/reorder',
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
name: collection,
clips: newMetadata.clips,
}),
const response = await apiFetch('meta/collection/clips/reorder', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
name: collection,
clips: newMetadata.clips,
}),
});
const data = await response.json();
console.log('handle reorder return:', data.collections);
dispatch({ type: 'metadata/setAllData', payload: data });
@ -78,7 +103,7 @@ export default function ClipList({ collection }: ClipListProps) {
type: 'metadata/deleteClip',
payload: { collection, clip: meta },
});
fetch('http://localhost:5010/meta/collection/clips/remove', {
apiFetch('meta/collection/clips/remove', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
@ -99,7 +124,7 @@ export default function ClipList({ collection }: ClipListProps) {
type: 'metadata/moveClip',
payload: { sourceCollection: collection, targetCollection, clip: meta },
});
fetch('http://localhost:5010/meta/collection/clips/move', {
apiFetch('meta/collection/clips/move', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
@ -120,19 +145,16 @@ export default function ClipList({ collection }: ClipListProps) {
type: 'metadata/editClip',
payload: { collection, clip: meta },
});
const response = await fetch(
'http://localhost:5010/meta/collection/clips/edit',
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
name: collection,
clip: meta,
}),
const response = await apiFetch('meta/collection/clips/edit', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
name: collection,
clip: meta,
}),
});
await response.json();
// console.log('handle clip save return:', data.collections);
dispatch({
@ -145,7 +167,11 @@ export default function ClipList({ collection }: ClipListProps) {
}
return (
<div className="min-h-full flex flex-col justify-start bg-midnight text-offwhite">
<div
className="min-h-full flex flex-col justify-start bg-midnight text-offwhite"
onDrop={handleDrop}
onDragOver={handleDragOver}
>
<DndContext
sensors={sensors}
collisionDetection={closestCenter}

View File

@ -0,0 +1,29 @@
import React from 'react';
export default function PlayOverlapIcon({
size = 24,
color = 'currentColor',
}: {
size?: number;
color?: string;
}) {
return (
<svg
width={size}
height={size}
viewBox="0 0 32 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
{/* Filled play arrow */}
<polygon points="4,4 4,20 16,12" fill={color} />
{/* Outlined play arrow (underneath and to the right) */}
<polygon
points="12,4 12,20 24,12"
fill="none"
stroke={color}
strokeWidth={1}
/>
</svg>
);
}

View File

@ -0,0 +1,23 @@
export default function PlayStopIcon({
size = 24,
color = 'currentColor',
}: {
size?: number;
color?: string;
}) {
return (
<svg
width={size}
height={size}
viewBox="0 0 48 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
aria-label="Play/Stop Icon"
>
{/* Play Arrow */}
<polygon points="4,4 20,12 4,20" fill={color} />
{/* Stop Square */}
<rect x="28" y="4" width="16" height="16" rx="2" fill={color} />
</svg>
);
}

View File

@ -1,10 +1,10 @@
import { ElectronHandler, FileHandler } from '../main/preload';
import { ElectronHandler, AudioHandler } from '../main/preload';
declare global {
// eslint-disable-next-line no-unused-vars
interface Window {
electron: ElectronHandler;
audio: FileHandler;
audio: AudioHandler;
}
}

View File

@ -5,3 +5,4 @@ ClipTrimDotNet/bin/
ClipTrimDotNet/obj/
ClipTrimDotNet/dist/
ClipTrimDotNet/node_modules/
.vs/

View File

@ -1,96 +0,0 @@
{
"Version": 1,
"WorkspaceRootPath": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\",
"Documents": [
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\client\\cliptrimclient.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\client\\cliptrimclient.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\player.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\player.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\profileswitcher.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\profileswitcher.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\client\\collectionmetadata.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\client\\collectionmetadata.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
}
],
"DocumentGroupContainers": [
{
"Orientation": 0,
"VerticalTabListWidth": 256,
"DocumentGroups": [
{
"DockedWidth": 297,
"SelectedChildIndex": 2,
"Children": [
{
"$type": "Bookmark",
"Name": "ST:0:0:{57d563b6-44a5-47df-85be-f4199ad6b651}"
},
{
"$type": "Document",
"DocumentIndex": 2,
"Title": "ProfileSwitcher.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\ProfileSwitcher.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\ProfileSwitcher.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\ProfileSwitcher.cs",
"RelativeToolTip": "ClipTrimDotNet\\ProfileSwitcher.cs",
"ViewState": "AgIAAFkAAAAAAAAAAAAlwG8AAABKAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:06:24.045Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 0,
"Title": "ClipTrimClient.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"RelativeToolTip": "ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"ViewState": "AgIAAEgAAAAAAAAAAAAuwGMAAAAJAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:03:49.814Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 3,
"Title": "CollectionMetaData.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"RelativeToolTip": "ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"ViewState": "AgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:03:47.862Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 1,
"Title": "Player.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Player.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Player.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Player.cs*",
"RelativeToolTip": "ClipTrimDotNet\\Player.cs*",
"ViewState": "AgIAAHIAAAAAAAAAAAA3wIYAAABMAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:00:23.762Z",
"EditorCaption": ""
},
{
"$type": "Bookmark",
"Name": "ST:0:0:{cce594b6-0c39-4442-ba28-10c64ac7e89f}"
}
]
}
]
}
]
}

View File

@ -1,113 +0,0 @@
{
"Version": 1,
"WorkspaceRootPath": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\",
"Documents": [
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\wavplayer.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\wavplayer.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\player.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\player.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\profileswitcher.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\profileswitcher.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\client\\cliptrimclient.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\client\\cliptrimclient.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
},
{
"AbsoluteMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|c:\\users\\mickl\\desktop\\cliptrim-ui\\cliptrimapp\\stream_deck_plugin\\cliptrimdotnet\\client\\collectionmetadata.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}",
"RelativeMoniker": "D:0:0:{4635D874-69C0-4010-BE46-77EF92EB1553}|ClipTrimDotNet\\ClipTrimDotNet.csproj|solutionrelative:cliptrimdotnet\\client\\collectionmetadata.cs||{A6C744A8-0E4A-4FC6-886A-064283054674}"
}
],
"DocumentGroupContainers": [
{
"Orientation": 0,
"VerticalTabListWidth": 256,
"DocumentGroups": [
{
"DockedWidth": 297,
"SelectedChildIndex": 1,
"Children": [
{
"$type": "Bookmark",
"Name": "ST:0:0:{57d563b6-44a5-47df-85be-f4199ad6b651}"
},
{
"$type": "Document",
"DocumentIndex": 0,
"Title": "WavPlayer.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\WavPlayer.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\WavPlayer.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\WavPlayer.cs",
"RelativeToolTip": "ClipTrimDotNet\\WavPlayer.cs",
"ViewState": "AgIAALYAAAAAAAAAAAAAALsAAAANAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:16:26.477Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 2,
"Title": "ProfileSwitcher.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\ProfileSwitcher.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\ProfileSwitcher.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\ProfileSwitcher.cs",
"RelativeToolTip": "ClipTrimDotNet\\ProfileSwitcher.cs",
"ViewState": "AgIAAG8AAAAAAAAAAAAWwG8AAABKAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:06:24.045Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 3,
"Title": "ClipTrimClient.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"RelativeToolTip": "ClipTrimDotNet\\Client\\ClipTrimClient.cs",
"ViewState": "AgIAAEgAAAAAAAAAAAAuwGIAAAApAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:03:49.814Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 4,
"Title": "CollectionMetaData.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"RelativeToolTip": "ClipTrimDotNet\\Client\\CollectionMetaData.cs",
"ViewState": "AgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:03:47.862Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 1,
"Title": "Player.cs",
"DocumentMoniker": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Player.cs",
"RelativeDocumentMoniker": "ClipTrimDotNet\\Player.cs",
"ToolTip": "C:\\Users\\mickl\\Desktop\\cliptrim-ui\\ClipTrimApp\\stream_deck_plugin\\ClipTrimDotNet\\Player.cs",
"RelativeToolTip": "ClipTrimDotNet\\Player.cs",
"ViewState": "AgIAAHoAAAAAAAAAAAAswIwAAAAbAAAAAAAAAA==",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.000738|",
"WhenOpened": "2026-02-21T15:00:23.762Z",
"EditorCaption": ""
},
{
"$type": "Bookmark",
"Name": "ST:0:0:{cce594b6-0c39-4442-ba28-10c64ac7e89f}"
}
]
}
]
}
]
}