1003 lines
40 KiB
JavaScript
1003 lines
40 KiB
JavaScript
(function(context,$) {
|
|
|
|
"use strict";
|
|
|
|
context.JK = context.JK || {};
|
|
context.JK.SessionScreen = function(app) {
|
|
var logger = context.JK.logger;
|
|
var sessionModel = null;
|
|
var sessionId;
|
|
var tracks = {};
|
|
var mixers = [];
|
|
var myTrackCount = 0;
|
|
|
|
// dialog variables
|
|
var unusedAudioInputChannels = [];
|
|
var track1AudioInputChannels = [];
|
|
var track2AudioInputChannels = [];
|
|
|
|
var unusedAudioOutputChannels = [];
|
|
var usedAudioOutputChannels = [];
|
|
|
|
var usedChatInputChannels = [];
|
|
|
|
var devices = [];
|
|
var original_device_id;
|
|
|
|
// TODO Consolidate dragged controls and handles
|
|
var $draggingFaderHandle = null;
|
|
var $draggingFader = null;
|
|
var $draggingVolumeHandle = null;
|
|
var $draggingVolume = null;
|
|
var $draggingChatHandle = null;
|
|
var $draggingChat = null;
|
|
var currentMixerIds = null;
|
|
var currentMixerRangeMin = null;
|
|
var currentMixerRangeMax = null;
|
|
|
|
var lookingForMixersCount = 0;
|
|
var lookingForMixersTimer = null;
|
|
var lookingForMixers = {};
|
|
|
|
var configure_audio_instructions = {
|
|
"Win32": "Choose the driver to use for audio and check its settings. Then use arrow " +
|
|
"buttons to assign audio inputs to your tracks, to indicate what instrument you are playing on " +
|
|
"each track, and to assign audio outputs for listening. If you don't see an audio device you think " +
|
|
"should be listed, view this help topic to understand why.",
|
|
|
|
"MacOSX": "Use arrow buttons to assign audio inputs to your tracks, to indicate what " +
|
|
"instrument you are playing on each track, and to assign audio outputs for listening. If you don't " +
|
|
"see an audio device you think should be listed, view this help topic to understand why.",
|
|
|
|
"Unix": "Use arrow buttons to assign audio inputs to your tracks, to indicate what " +
|
|
"instrument you are playing on each track, and to assign audio outputs for listening. If you don't " +
|
|
"see an audio device you think should be listed, view this help topic to understand why."
|
|
};
|
|
|
|
var configure_voice_instructions = "If you are using a microphone to capture your instrumental or vocal audio, you can simply use that mic " +
|
|
"for both music and chat. Otherwise, choose a device to use for voice chat, and use arrow buttons to " +
|
|
"select an input on that device.";
|
|
|
|
var defaultParticipant = {
|
|
tracks: [{
|
|
instrument_id: "unknown"
|
|
}],
|
|
user: {
|
|
first_name: 'Unknown',
|
|
last_name: 'User',
|
|
photo_url: null
|
|
}
|
|
};
|
|
|
|
var instrumentIcons = context.JK.getInstrumentIconMap45();
|
|
|
|
// Recreate ChannelGroupIDs ENUM from C++
|
|
var ChannelGroupIds = {
|
|
"MasterGroup": 0,
|
|
"MonitorGroup": 1,
|
|
"AudioInputMusicGroup": 2,
|
|
"AudioInputChatGroup": 3,
|
|
"MediaTrackGroup": 4,
|
|
"StreamOutMusicGroup": 5,
|
|
"StreamOutChatGroup": 6,
|
|
"UserMusicInputGroup": 7,
|
|
"UserChatInputGroup": 8,
|
|
"PeerAudioInputMusicGroup": 9
|
|
};
|
|
|
|
function beforeShow(data) {
|
|
sessionId = data.id;
|
|
$('#session-mytracks-container').empty();
|
|
$('#session-livetracks-container').empty();
|
|
}
|
|
|
|
function afterShow(data) {
|
|
// Subscribe for callbacks on audio events
|
|
context.jamClient.SessionRegisterCallback("JK.HandleBridgeCallback");
|
|
|
|
// If you load this page directly, the loading of the current user
|
|
// is happening in parallel. We can't join the session until the
|
|
// current user has been completely loaded. Poll for the current user
|
|
// before proceeding with session joining.
|
|
function checkForCurrentUser() {
|
|
if (context.JK.userMe) {
|
|
logger.debug("current user loaded. Proceeding to join session.");
|
|
afterCurrentUserLoaded();
|
|
} else {
|
|
logger.debug("Current user not loaded yet. Waiting...");
|
|
context.setTimeout(checkForCurrentUser, 100);
|
|
}
|
|
}
|
|
checkForCurrentUser();
|
|
|
|
setAudioInstructions();
|
|
}
|
|
|
|
function afterCurrentUserLoaded() {
|
|
sessionModel = new context.JK.SessionModel(
|
|
context.JK.JamServer,
|
|
context.jamClient,
|
|
context.JK.userMe
|
|
);
|
|
sessionModel.subscribe('sessionScreen', sessionChanged);
|
|
sessionModel.joinSession(sessionId);
|
|
}
|
|
|
|
function beforeHide(data) {
|
|
sessionModel.leaveCurrentSession(sessionId);
|
|
// 'unregister' for callbacks
|
|
context.jamClient.SessionRegisterCallback("");
|
|
}
|
|
|
|
function sessionChanged() {
|
|
renderSession();
|
|
}
|
|
|
|
function renderSession() {
|
|
$('#session-mytracks-container').empty();
|
|
$('#session-livetracks-container').empty();
|
|
_updateMixers();
|
|
_renderTracks();
|
|
_wireTopVolume();
|
|
_wireTopMix();
|
|
_addVoiceChat();
|
|
|
|
toggleTrack2ConfigDetails(myTrackCount > 1);
|
|
}
|
|
|
|
// Get the latest list of underlying audio mixer channels
|
|
function _updateMixers() {
|
|
var mixerIds = context.jamClient.SessionGetIDs();
|
|
var holder = $.extend(true, {}, {mixers: context.jamClient.SessionGetControlState(mixerIds)});
|
|
mixers = holder.mixers;
|
|
// Always add a hard-coded simplified 'mixer' for the L2M mix
|
|
var l2m_mixer = {
|
|
id: '__L2M__',
|
|
range_low: -80,
|
|
range_high: 20,
|
|
volume_left: context.jamClient.SessionGetMasterLocalMix()
|
|
};
|
|
mixers.push(l2m_mixer);
|
|
}
|
|
|
|
function _mixerForClientId(clientId, groupIds) {
|
|
var foundMixer = null;
|
|
$.each(mixers, function(index, mixer) {
|
|
if (mixer.client_id === clientId) {
|
|
for (var i=0; i<groupIds.length; i++) {
|
|
if (mixer.group_id === groupIds[i]) {
|
|
foundMixer = mixer;
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
});
|
|
return foundMixer;
|
|
}
|
|
|
|
function _wireTopVolume() {
|
|
var $volumeSlider = $('#volume');
|
|
var mixerIds = [];
|
|
$.each(mixers, function(index, mixer) {
|
|
if (mixer.group_id === ChannelGroupIds.MasterGroup) {
|
|
mixerIds.push(mixer.id);
|
|
var gainPercent = percentFromMixerValue(
|
|
mixer.range_low, mixer.range_high, mixer.volume_left);
|
|
$volumeSlider.find('.handle').css('left', gainPercent + '%');
|
|
}
|
|
if (mixer.group_id === ChannelGroupIds.MonitorGroup) {
|
|
mixerIds.push(mixer.id);
|
|
}
|
|
});
|
|
$volumeSlider.attr('mixer-id', mixerIds.join(','));
|
|
}
|
|
|
|
/**
|
|
* This control has it's own Set/Get methods, so we don't need to
|
|
* line it up with some mixer later. We'll use a special mixer-id value
|
|
* to let us know we're dealing with the mix control.
|
|
*/
|
|
function _wireTopMix() {
|
|
var $mixSlider = $('#l2m');
|
|
var l2m_mixer = {
|
|
range_low: -80,
|
|
range_high: 20,
|
|
volume_left: context.jamClient.SessionGetMasterLocalMix()
|
|
};
|
|
var gainPercent = percentFromMixerValue(
|
|
l2m_mixer.range_low, l2m_mixer.range_high, l2m_mixer.volume_left);
|
|
$mixSlider.find('.handle').css('left', gainPercent + '%');
|
|
}
|
|
|
|
function _addVoiceChat() {
|
|
// If, and only if, there is a mixer in group 3 (voice chat)
|
|
// Add the voice chat controls below my tracks, and hook up the mixer.
|
|
// Assumption is that there is only ever one, so we just take the first one.
|
|
$.each(mixers, function(index, mixer) {
|
|
if (mixer.group_id === ChannelGroupIds.AudioInputChatGroup) {
|
|
var $voiceChat = $('#voice-chat');
|
|
$voiceChat.show();
|
|
$voiceChat.attr('mixer-id', mixer.id);
|
|
$('#voice-chat .voicechat-mute').attr('mixer-id', mixer.id);
|
|
|
|
var gainPercent = percentFromMixerValue(
|
|
mixer.range_low, mixer.range_high, mixer.volume_left);
|
|
$voiceChat.find('.voicechat-gain-slider').css({'left': gainPercent + '%'});
|
|
if (mixer.mute) {
|
|
var $mute = $voiceChat.find('.voicechat-mute');
|
|
_toggleVisualMuteControl($mute, true);
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
function _renderTracks() {
|
|
// Participants are here now, but the mixers don't update right away.
|
|
// Draw tracks from participants, then setup timers to look for the
|
|
// mixers that go with those participants, if they're missing.
|
|
lookingForMixersCount = 0;
|
|
$.each(sessionModel.participants(), function(index, participant) {
|
|
|
|
var name = participant.user.name;
|
|
if (!(name)) {
|
|
name = participant.user.first_name + ' ' + participant.user.last_name;
|
|
}
|
|
var instrumentIcon = context.JK.getInstrumentIcon45(participant.tracks[0].instrument_id);
|
|
var photoUrl = context.JK.resolveAvatarUrl(participant.user.photo_url);
|
|
|
|
var myTrack = false;
|
|
|
|
// Default trackData to participant + no Mixer state.
|
|
var trackData = {
|
|
clientId: participant.client_id,
|
|
name: name,
|
|
instrumentIcon: instrumentIcon,
|
|
avatar: photoUrl,
|
|
latency: "good",
|
|
gainPercent: 0,
|
|
muteClass: 'muted',
|
|
mixerId: ""
|
|
};
|
|
|
|
var mixer = _mixerForClientId(
|
|
participant.client_id,
|
|
[
|
|
ChannelGroupIds.AudioInputMusicGroup,
|
|
ChannelGroupIds.UserMusicInputGroup
|
|
]);
|
|
if (mixer) {
|
|
myTrack = (mixer.group_id === ChannelGroupIds.AudioInputMusicGroup);
|
|
|
|
var gainPercent = percentFromMixerValue(
|
|
mixer.range_low, mixer.range_high, mixer.volume_left);
|
|
var muteClass = "enabled";
|
|
if (mixer.mute) {
|
|
muteClass = "muted";
|
|
}
|
|
trackData.gainPercent = gainPercent;
|
|
trackData.muteClass = muteClass;
|
|
trackData.mixerId = mixer.id;
|
|
} else { // No mixer to match, yet
|
|
lookingForMixers[participant.client_id] = true;
|
|
if (!(lookingForMixersTimer)) {
|
|
lookingForMixersTimer = context.setInterval(lookForMixers, 300);
|
|
}
|
|
}
|
|
_addTrack(trackData);
|
|
// Show settings icons only for my tracks
|
|
if (myTrack) {
|
|
$('div[mixer-id="' + mixer.id + '"].track-icon-settings').show();
|
|
myTrackCount++;
|
|
}
|
|
});
|
|
}
|
|
|
|
function toggleTrack2ConfigDetails(visible) {
|
|
if (visible) {
|
|
$('#track2-arrows-div').show();
|
|
$('#track2-input-div').show();
|
|
$('#track2-instrument-div').show();
|
|
|
|
$('#track1-input').height('70px');
|
|
$('#track1-instrument').height('70px');
|
|
}
|
|
else {
|
|
$('#track2-arrows-div').hide();
|
|
$('#track2-input-div').hide();
|
|
$('#track2-instrument-div').hide();
|
|
|
|
$('#track1-input').height('145px');
|
|
$('#track1-instrument').height('145px');
|
|
}
|
|
}
|
|
|
|
// Function called on an interval when participants change. Mixers seem to
|
|
// show up later, so we render the tracks from participants, but keep track
|
|
// of the ones there weren't any mixers for, and continually try to find them
|
|
// and get them connected to the mixers underneath.
|
|
function lookForMixers() {
|
|
lookingForMixersCount++;
|
|
_updateMixers();
|
|
var keysToDelete = [];
|
|
for (var key in lookingForMixers) {
|
|
var mixer = _mixerForClientId(
|
|
key,
|
|
[
|
|
ChannelGroupIds.AudioInputMusicGroup,
|
|
ChannelGroupIds.UserMusicInputGroup
|
|
]);
|
|
if (mixer) {
|
|
keysToDelete.push(key);
|
|
var gainPercent = percentFromMixerValue(
|
|
mixer.range_low, mixer.range_high, mixer.volume_left);
|
|
var $track = $('div.track[client-id="' + key + '"]');
|
|
// Set mixer-id attributes
|
|
$track.find('.track-vu-left').attr('mixer-id', mixer.id + "_vul");
|
|
$track.find('.track-vu-right').attr('mixer-id', mixer.id + "_vur");
|
|
$track.find('.track-gain').attr('mixer-id', mixer.id);
|
|
$track.find('.track-icon-mute').attr('mixer-id', mixer.id);
|
|
$track.find('.track-icon-settings').attr('mixer-id', mixer.id);
|
|
|
|
// Set gain position
|
|
$track.find('.track-gain-slider').css('bottom', gainPercent + '%');
|
|
|
|
// Set mute state
|
|
_toggleVisualMuteControl($track.find('.track-icon-mute'), mixer.mute);
|
|
}
|
|
}
|
|
|
|
for (var i=0; i<keysToDelete.length; i++) {
|
|
delete lookingForMixers[keysToDelete[i]];
|
|
}
|
|
|
|
if (context.JK.dlen(lookingForMixers) === 0 ||
|
|
lookingForMixersCount > 10) {
|
|
lookingForMixersCount = 0;
|
|
lookingForMixers = {};
|
|
context.clearTimeout(lookingForMixersTimer);
|
|
lookingForMixersTimer = null;
|
|
}
|
|
}
|
|
|
|
// Given a mixerID and a value between 0.0-1.0,
|
|
// light up the proper VU lights.
|
|
function _updateVU(mixerId, value) {
|
|
// Minor tweak to support VU values -- the mixerId here will
|
|
// have a suffix added _vul or _vur to indicate the channel.
|
|
// There are 13 VU lights. Figure out how many to
|
|
// light based on the incoming value.
|
|
var i = 0;
|
|
var state = 'on';
|
|
var lights = Math.round(13 * value);
|
|
var selector = null;
|
|
var $light = null;
|
|
var colorClass = 'vu-green-';
|
|
// Remove all light classes from all lights
|
|
var allLightsSelector = '#tracks table[mixer-id="' + mixerId + '"] td.vulight';
|
|
$(allLightsSelector).removeClass('vu-green-off vu-green-on vu-red-off vu-red-on');
|
|
|
|
// Set the lights
|
|
for (i=0; i<13; i++) {
|
|
colorClass = 'vu-green-';
|
|
state = 'on';
|
|
if (i > 8) {
|
|
colorClass = 'vu-red-';
|
|
}
|
|
if (i >= lights) {
|
|
state = 'off';
|
|
}
|
|
selector = '#tracks table[mixer-id="' + mixerId + '"] td.vu' + i;
|
|
$light = $(selector);
|
|
$light.addClass(colorClass + state);
|
|
}
|
|
}
|
|
|
|
function _addTrack(trackData) {
|
|
var $destination = $('#session-mytracks-container');
|
|
if (trackData.clientId !== app.clientId) {
|
|
$destination = $('#session-livetracks-container');
|
|
}
|
|
trackData["left-vu"] = $('#template-vu').html();
|
|
trackData["right-vu"] = trackData["left-vu"];
|
|
var template = $('#template-session-track').html();
|
|
var newTrack = context.JK.fillTemplate(template, trackData);
|
|
$destination.append(newTrack);
|
|
$('div[mixer-id="' + trackData.mixerId + '"].track-icon-settings').click(function() {
|
|
initMusicAudioPanel();
|
|
});
|
|
tracks[trackData.clientId] = new context.JK.SessionTrack(trackData.clientId);
|
|
}
|
|
|
|
function handleBridgeCallback() {
|
|
var eventName = null;
|
|
var mixerId = null;
|
|
var value = null;
|
|
var tuples = arguments.length / 3;
|
|
for (var i=0; i<tuples; i++) {
|
|
eventName = arguments[3*i];
|
|
mixerId = arguments[(3*i)+1];
|
|
value = arguments[(3*i)+2];
|
|
var vuVal = 0.0;
|
|
if (eventName === 'left_vu' || eventName === 'right_vu') {
|
|
// TODO - no guarantee range will be -80 to 20. Get from the
|
|
// GetControlState for this mixer which returns min/max
|
|
// value is a DB value from -80 to 20. Convert to float from 0.0-1.0
|
|
vuVal = (value + 80) / 100;
|
|
if (eventName === 'left_vu') {
|
|
mixerId = mixerId + "_vul";
|
|
} else {
|
|
mixerId = mixerId + "_vur";
|
|
}
|
|
_updateVU(mixerId, vuVal);
|
|
} else if (eventName === 'add' || eventName === 'remove') {
|
|
// TODO - _renderSession. Note I get streams of these in
|
|
// sequence, so have Nat fix, or buffer/spam protect
|
|
// Note - this is already handled from websocket events.
|
|
// However, there may be use of these two events to avoid
|
|
// the polling-style check for when a mixer has been added
|
|
// to match a participant track.
|
|
} else {
|
|
// Examples of other events
|
|
// Add media file track: "add", "The_Abyss_4T", 0
|
|
logger.debug('non-vu event: ' + eventName + ',' + mixerId + ',' + value);
|
|
}
|
|
}
|
|
}
|
|
|
|
function deleteSession(evt) {
|
|
var sessionId = $(evt.currentTarget).attr("action-id");
|
|
if (sessionId) {
|
|
$.ajax({
|
|
type: "DELETE",
|
|
url: "/api/sessions/" + sessionId,
|
|
success: function(response) {
|
|
context.location="#/home";
|
|
},
|
|
error: function(jqXHR, textStatus, errorThrown) {
|
|
logger.error("Error deleting session " + sessionId);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function _toggleVisualMuteControl($control, muting) {
|
|
if (muting) {
|
|
$control.removeClass('enabled');
|
|
$control.addClass('muted');
|
|
} else {
|
|
$control.removeClass('muted');
|
|
$control.addClass('enabled');
|
|
}
|
|
}
|
|
|
|
function _toggleAudioMute(mixerId, muting) {
|
|
fillTrackVolumeObject(mixerId);
|
|
context.trackVolumeObject.mute = muting;
|
|
context.jamClient.SessionSetControlState(mixerId);
|
|
}
|
|
|
|
function toggleMute(evt) {
|
|
var $control = $(evt.currentTarget);
|
|
var muting = ($control.hasClass('enabled'));
|
|
var mixerIds = $control.attr('mixer-id').split(',');
|
|
$.each(mixerIds, function(i,v) {
|
|
_toggleAudioMute(v, muting);
|
|
});
|
|
_toggleVisualMuteControl($control, muting);
|
|
}
|
|
|
|
function getVerticalFaderPercent(eventY, $fader) {
|
|
return getFaderPercent(eventY, $fader, 'vertical');
|
|
}
|
|
|
|
function getHorizontalFaderPercent(eventX, $fader) {
|
|
return getFaderPercent(eventX, $fader, 'horizontal');
|
|
}
|
|
|
|
function getFaderPercent(value, $fader, orientation) {
|
|
var faderPosition = $fader.offset();
|
|
var faderMin = faderPosition.top;
|
|
var faderSize = $fader.height();
|
|
var handleValue = (faderSize - (value-faderMin));
|
|
if (orientation === "horizontal") {
|
|
faderMin = faderPosition.left;
|
|
faderSize = $fader.width();
|
|
handleValue = (value - faderMin);
|
|
}
|
|
var faderPct = Math.round(handleValue/faderSize * 100);
|
|
if (faderPct < 0) {
|
|
faderPct = 0;
|
|
}
|
|
if (faderPct > 100) {
|
|
faderPct = 100;
|
|
}
|
|
return faderPct;
|
|
}
|
|
|
|
function fillTrackVolumeObject(mixerId, broadcast) {
|
|
_updateMixers();
|
|
var mixer = null;
|
|
var _broadcast = true;
|
|
if (broadcast !== undefined) {
|
|
_broadcast = broadcast;
|
|
}
|
|
for (var i=0; i<mixers.length; i++) {
|
|
mixer = mixers[i];
|
|
if (mixer.id === mixerId) {
|
|
context.trackVolumeObject.clientID = mixer.client_id;
|
|
context.trackVolumeObject.broadcast = _broadcast;
|
|
context.trackVolumeObject.master = mixer.master;
|
|
context.trackVolumeObject.monitor = mixer.monitor;
|
|
context.trackVolumeObject.mute = mixer.mute;
|
|
context.trackVolumeObject.name = mixer.name;
|
|
context.trackVolumeObject.record = mixer.record;
|
|
context.trackVolumeObject.volL = mixer.volume_left;
|
|
context.trackVolumeObject.volR = mixer.volume_right;
|
|
// trackVolumeObject doesn't have a place for range min/max
|
|
currentMixerRangeMin = mixer.range_low;
|
|
currentMixerRangeMax = mixer.range_high;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Given a mixer's min/max and current value, return it as
|
|
// a percent from 0-100. Return an integer.
|
|
function percentFromMixerValue(min, max, value) {
|
|
var range = Math.abs(max - min);
|
|
var magnitude = value - min;
|
|
var percent = Math.round(100*(magnitude/range));
|
|
return percent;
|
|
}
|
|
|
|
// Given a mixer's min/max and a percent value, return it as
|
|
// the mixer's value. Returns an integer.
|
|
function percentToMixerValue(min, max, percent) {
|
|
var range = Math.abs(max - min);
|
|
var multiplier = percent/100; // Change 85 into 0.85
|
|
var value = min + (multiplier * range);
|
|
// Protect against percents < 0 and > 100
|
|
if (value < min) {
|
|
value = min;
|
|
}
|
|
if (value > max) {
|
|
value = max;
|
|
}
|
|
return value;
|
|
}
|
|
|
|
// Given a volume percent (0-100), set the underlying
|
|
// audio volume level of the passed mixerId to the correct
|
|
// value.
|
|
function setMixerVolume(mixerId, volumePercent) {
|
|
// The context.trackVolumeObject has been filled with the mixer values
|
|
// that go with mixerId, and the range of that mixer
|
|
// has been set in currentMixerRangeMin-Max.
|
|
// All that needs doing is to translate the incoming percent
|
|
// into the real value ont the sliders range. Set Left/Right
|
|
// volumes on trackVolumeObject, and call SetControlState to stick.
|
|
var sliderValue = percentToMixerValue(
|
|
currentMixerRangeMin, currentMixerRangeMax, volumePercent);
|
|
context.trackVolumeObject.volL = sliderValue;
|
|
context.trackVolumeObject.volR = sliderValue;
|
|
// Special case for L2M mix:
|
|
if (mixerId === '__L2M__') {
|
|
context.jamClient.SessionSetMasterLocalMix(sliderValue);
|
|
} else {
|
|
context.jamClient.SessionSetControlState(mixerId);
|
|
}
|
|
}
|
|
|
|
|
|
// Refactor. Only need one of these xxxDown methods.
|
|
function faderHandleDown(evt) {
|
|
evt.stopPropagation();
|
|
$draggingFaderHandle = $(evt.currentTarget);
|
|
$draggingFader = $draggingFaderHandle.closest('div[control="fader"]');
|
|
currentMixerIds = $draggingFader.closest('[mixer-id]').attr('mixer-id').split(',');
|
|
var mixerIds = currentMixerIds;
|
|
$.each(mixerIds, function(i,v) {
|
|
fillTrackVolumeObject(v);
|
|
});
|
|
return false;
|
|
}
|
|
|
|
function faderMouseUp(evt) {
|
|
evt.stopPropagation();
|
|
if ($draggingFaderHandle) {
|
|
$draggingFaderHandle = null;
|
|
$draggingFader = null;
|
|
currentMixerIds = null;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
function faderMouseMove(evt) {
|
|
// bail out early if there's no in-process drag
|
|
if (!($draggingFaderHandle)) {
|
|
return false;
|
|
}
|
|
var $fader = $draggingFader;
|
|
var $handle = $draggingFaderHandle;
|
|
evt.stopPropagation();
|
|
var orientation = $fader.attr('orientation');
|
|
var getPercentFunction = getVerticalFaderPercent;
|
|
var absolutePosition = evt.clientY;
|
|
var handleCssAttribute = 'bottom';
|
|
if (orientation && orientation == 'horizontal') {
|
|
getPercentFunction = getHorizontalFaderPercent;
|
|
absolutePosition = evt.clientX;
|
|
handleCssAttribute = 'left';
|
|
}
|
|
var faderPct = getPercentFunction(absolutePosition, $fader);
|
|
var mixerIds = currentMixerIds;
|
|
$.each(mixerIds, function(i,v) {
|
|
setMixerVolume(v, faderPct);
|
|
});
|
|
if (faderPct > 90) { faderPct = 90; } // Visual limit
|
|
$handle.css(handleCssAttribute, faderPct + '%');
|
|
return false;
|
|
}
|
|
|
|
function faderClick(evt) {
|
|
evt.stopPropagation();
|
|
if ($draggingFaderHandle) {
|
|
return;
|
|
}
|
|
var $fader = $(evt.currentTarget);
|
|
|
|
// Switch based on fader orientation
|
|
var orientation = $fader.attr('orientation');
|
|
var getPercentFunction = getVerticalFaderPercent;
|
|
var absolutePosition = evt.clientY;
|
|
var handleCssAttribute = 'bottom';
|
|
if (orientation && orientation == 'horizontal') {
|
|
getPercentFunction = getHorizontalFaderPercent;
|
|
absolutePosition = evt.clientX;
|
|
handleCssAttribute = 'left';
|
|
}
|
|
|
|
var $handle = $fader.find('div[control="fader-handle"]');
|
|
var faderPct = getPercentFunction(absolutePosition, $fader);
|
|
// mixerIds can be a comma-separated list
|
|
var mixerIds = $fader.closest('[mixer-id]').attr('mixer-id').split(',');
|
|
$.each(mixerIds, function(i,v) {
|
|
fillTrackVolumeObject(v);
|
|
setMixerVolume(v, faderPct);
|
|
});
|
|
if (faderPct > 90) { faderPct = 90; } // Visual limit
|
|
$handle.css(handleCssAttribute, faderPct + '%');
|
|
return false;
|
|
}
|
|
|
|
function events() {
|
|
$('#session-contents').on("click", '[action="delete"]', deleteSession);
|
|
$('#tracks').on('click', 'div[control="mute"]', toggleMute);
|
|
|
|
$('#tracks').on('click', 'div[control="fader"]', faderClick);
|
|
$('#tracks').on('mousedown', 'div[control="fader-handle"]', faderHandleDown);
|
|
$('#tracks').on('mousemove', faderMouseMove);
|
|
$('body').on('mouseup', faderMouseUp);
|
|
|
|
$('#session-controls .fader').on('click', 'div[control="fader"]', faderClick);
|
|
$('#session-controls .fader').on('mousedown', '.handle', faderHandleDown);
|
|
$('body').on('mousemove', faderMouseMove);
|
|
$('body').on('mouseup', faderMouseUp);
|
|
|
|
// Go ahead and wire up voice-chat events, although it won't be visible
|
|
// (and can't fire events) unless the user has a voice chat mixer
|
|
$('#voice-chat').on('click', 'div[control="fader"]', faderClick);
|
|
$('#voice-chat').on('mousedown', 'div[control="fader-handle"]', faderHandleDown);
|
|
$('#voice-chat').on('mousemove', faderMouseMove);
|
|
$('body').on('mouseup', faderMouseUp);
|
|
|
|
$('#tab-configure-audio').click(function() {
|
|
// validate voice chat settings
|
|
//if (validateVoiceChatSettings()) {
|
|
setAudioInstructions();
|
|
initMusicAudioPanel();
|
|
//}
|
|
});
|
|
|
|
$('#tab-configure-voice').click(function() {
|
|
// validate audio settings
|
|
//if (validateAudioSettings()) {
|
|
initVoiceChatPanel();
|
|
$('#instructions', 'div[layout-id="configure-audio"]').html(configure_voice_instructions);
|
|
//}
|
|
});
|
|
|
|
// Track 1 Add
|
|
$('#img-track1-input-add').click(function() {
|
|
$('#audio-inputs-unused option:selected').remove().appendTo('#track1-input');
|
|
});
|
|
|
|
// Track 1 Remove
|
|
$('#img-track1-input-remove').click(function() {
|
|
$('#track1-input option:selected').remove().appendTo('#audio-inputs-unused');
|
|
});
|
|
|
|
// Track 2 Add
|
|
$('#img-track2-input-add').click(function() {
|
|
$('#audio-inputs-unused option:selected').remove().appendTo('#track2-input');
|
|
});
|
|
|
|
// Track 2 Remove
|
|
$('#img-track2-input-remove').click(function() {
|
|
$('#track2-input option:selected').remove().appendTo('#audio-inputs-unused');
|
|
});
|
|
|
|
// Audio Output Add
|
|
$('#img-audio-output-add').click(function() {
|
|
$('#audio-output-unused option:selected').remove().appendTo('#audio-output-selection');
|
|
});
|
|
|
|
// Audio Output Remove
|
|
$('#img-audio-output-remove').click(function() {
|
|
$('#audio-output-selection option:selected').remove().appendTo('#audio-output-unused');
|
|
});
|
|
|
|
$('.voicechat-settings').click(function() {
|
|
initVoiceChatPanel();
|
|
});
|
|
|
|
$('#audio-drivers').change(function() {
|
|
audioDriverChanged();
|
|
});
|
|
|
|
$('#btn-driver-settings').click(function() {
|
|
logger.debug("Opening control panel...");
|
|
context.jamClient.TrackOpenControlPanel();
|
|
});
|
|
|
|
$('#btn-save-settings').click(saveSettings);
|
|
|
|
$('#btn-cancel-settings').click(cancelSettings);
|
|
|
|
$('#btn-leave-session-test').click(function() {
|
|
$('div[layout-id="configure-audio"]').hide();
|
|
});
|
|
}
|
|
|
|
function audioDriverChanged() {
|
|
context.jamClient.TrackSetMusicDevice($('#audio-drivers').val());
|
|
|
|
if (context.jamClient.TrackHasControlPanel()) {
|
|
logger.debug("Showing DRIVER SETTINGS button...");
|
|
$('#btn-driver-settings').show();
|
|
}
|
|
else {
|
|
logger.debug("Hiding DRIVER SETTINGS button...");
|
|
$('#btn-driver-settings').hide();
|
|
}
|
|
}
|
|
|
|
function initMusicAudioPanel() {
|
|
|
|
$('div[tab-id="music-audio"]').show();
|
|
$('div[tab-id="voice-chat"]').hide();
|
|
|
|
$('#tab-configure-audio').addClass('selected');
|
|
$('#tab-configure-voice').removeClass('selected');
|
|
|
|
$('#audio-drivers').empty();
|
|
|
|
// determine correct music device to preselect
|
|
var original_device_id = context.jamClient.TrackGetMusicDeviceID();
|
|
var current_device_id = original_device_id;
|
|
|
|
// load Audio Driver dropdown
|
|
devices = context.jamClient.TrackGetDevices();
|
|
var keys = Object.keys(devices);
|
|
|
|
for (var i=0; i < keys.length; i++) {
|
|
var template = $('#template-option').html();
|
|
var isSelected = "";
|
|
if (keys[i] === current_device_id) {
|
|
isSelected = "selected";
|
|
}
|
|
var html = context.JK.fillTemplate(template, {
|
|
value: keys[i],
|
|
label: devices[keys[i]],
|
|
selected: isSelected
|
|
});
|
|
|
|
$('#audio-drivers').append(html);
|
|
}
|
|
|
|
$('#audio-inputs-unused').empty();
|
|
$('#track1-input').empty();
|
|
$('#track1-instrument').empty();
|
|
$('#track2-input').empty();
|
|
$('#track2-instrument').empty();
|
|
$('#audio-output-unused').empty();
|
|
$('#audio-output-selection').empty();
|
|
|
|
initDialogData();
|
|
|
|
// load Unused Inputs
|
|
loadOptions($('#audio-inputs-unused'), unusedAudioInputChannels, "device_id", "name", -1);
|
|
|
|
// load Track 1 Input(s)
|
|
loadOptions($('#track1-input'), track1AudioInputChannels, "device_id", "name", -1);
|
|
|
|
// load Track 1 Instrument
|
|
var instrument = context.jamClient.TrackGetInstrument(1);
|
|
// TODO: use map of integer to instrument to get appropriate instrument
|
|
//loadOptions($('#track1-instrument', [instrument], "", "", -1));
|
|
|
|
// load Track 2 config details if necessary
|
|
if (myTrackCount > 1) {
|
|
// load Track 2 Input(s)
|
|
loadOptions($('#track2-input'), track2AudioInputChannels, "device_id", "name", -1);
|
|
|
|
// load Track 2 Instrument
|
|
instrument = context.jamClient.TrackGetInstrument(2);
|
|
// TODO: use map of integer to instrument to get appropriate instrument
|
|
// loadOptions($('#track1-instrument', [instrument], "", "", -1));
|
|
}
|
|
|
|
// load Unused Outputs
|
|
loadOptions($('#audio-output-unused'), unusedAudioOutputChannels, "device_id", "name", -1);
|
|
|
|
// load Session Audio Output
|
|
loadOptions($('#audio-output-selection'), usedAudioOutputChannels, "device_id", "name", -1);
|
|
}
|
|
|
|
function initVoiceChatPanel() {
|
|
$('div[tab-id="music-audio"]').hide();
|
|
$('div[tab-id="voice-chat"]').show();
|
|
|
|
$('#tab-configure-audio').removeClass('selected');
|
|
$('#tab-configure-voice').addClass('selected');
|
|
|
|
// TODO: add logic for Voice Chat Device dropdown change event
|
|
|
|
$('#voice-inputs-unused').empty();
|
|
$('#voice-inputs-selection').empty();
|
|
|
|
initDialogData();
|
|
|
|
// load Voice Chat Input
|
|
loadOptions($('#voice-inputs-unused'), unusedAudioInputChannels, "device_id", "name", -1);
|
|
loadOptions($('#voice-inputs-selection'), usedChatInputChannels, "device_id", "name", -1);
|
|
}
|
|
|
|
function initDialogData() {
|
|
|
|
// clear out arrays
|
|
unusedAudioInputChannels = [];
|
|
track1AudioInputChannels = [];
|
|
track2AudioInputChannels = [];
|
|
unusedAudioOutputChannels = [];
|
|
usedAudioOutputChannels = [];
|
|
usedChatInputChannels = [];
|
|
|
|
// get data needed for listboxes
|
|
var channels = context.jamClient.TrackGetChannels();
|
|
|
|
/* -2 = chat
|
|
-1 = speakers
|
|
0 = unassigned
|
|
>0 = the music input track number */
|
|
|
|
$.each(channels, function(index, val) {
|
|
var assignment = context.jamClient.TrackGetAssignment(val.id, val.input);
|
|
logger.debug("channel id=" + val.id + ", channel input=" + val.input + ", channel assignment=" + assignment +
|
|
", channel name=" + val.name + ", channel type=" + val.device_type);
|
|
|
|
// INPUT
|
|
if (context.jamClient.TrackIsMusicDeviceType(val.device_type)) {
|
|
if (val.input) {
|
|
if (assignment === -2) {
|
|
usedChatInputChannels.push(val);
|
|
}
|
|
else if (assignment === 0) {
|
|
unusedAudioInputChannels.push(val);
|
|
}
|
|
else if (assignment === -1) {
|
|
|
|
}
|
|
else {
|
|
if (assignment === 1) {
|
|
track1AudioInputChannels.push(val);
|
|
}
|
|
else {
|
|
track2AudioInputChannels.push(val);
|
|
}
|
|
}
|
|
}
|
|
|
|
// OUTPUT
|
|
else {
|
|
if (assignment === -2) {
|
|
|
|
}
|
|
else if (assignment === 0) {
|
|
unusedAudioOutputChannels.push(val);
|
|
}
|
|
else if (assignment === -1) {
|
|
|
|
}
|
|
else {
|
|
usedAudioOutputChannels.push(val);
|
|
}
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
function loadOptions(listbox_id, input_array, id_field, text_field, selected_id) {
|
|
$.each(input_array, function(index, val) {
|
|
var template = $('#template-option').html();
|
|
var isSelected = "";
|
|
if (val[id_field] === selected_id) {
|
|
isSelected = "selected";
|
|
}
|
|
var html = context.JK.fillTemplate(template, {
|
|
value: val[id_field],
|
|
label: val[text_field],
|
|
selected: isSelected
|
|
});
|
|
|
|
listbox_id.append(html);
|
|
});
|
|
}
|
|
|
|
function saveSettings() {
|
|
if (!validateAudioSettings()) {
|
|
return false;
|
|
}
|
|
|
|
if (!validateVoiceChatSettings()) {
|
|
return false;
|
|
}
|
|
|
|
// TrackSetAssignment(const QString& id, bool input, int assignment)
|
|
// TrackSetInstrument(int track, int instrument_id)
|
|
// TrackSaveAssignments()
|
|
|
|
// update jam-db with settings
|
|
|
|
// TODO: reload / refresh Session screen (instrument icons at minimum)
|
|
}
|
|
|
|
function cancelSettings() {
|
|
// reset to original device ID
|
|
context.jamClient.TrackSetMusicDevice(original_device_id);
|
|
}
|
|
|
|
function validateAudioSettings() {
|
|
// verify Track 1 Input and Instrument exist
|
|
|
|
// verify Session Audio Output exists
|
|
return true;
|
|
}
|
|
|
|
function validateVoiceChatSettings() {
|
|
return true;
|
|
}
|
|
|
|
function setAudioInstructions() {
|
|
var os = context.jamClient.GetOSAsString();
|
|
$('#instructions', 'div[layout-id="configure-audio"]').html(configure_audio_instructions[os]);
|
|
}
|
|
|
|
this.initialize = function() {
|
|
context.jamClient.SetVURefreshRate(150);
|
|
events();
|
|
var screenBindings = {
|
|
'beforeShow': beforeShow,
|
|
'afterShow': afterShow,
|
|
'beforeHide': beforeHide
|
|
};
|
|
app.bindScreen('session', screenBindings);
|
|
};
|
|
|
|
this.tracks = tracks;
|
|
|
|
context.JK.HandleBridgeCallback = handleBridgeCallback;
|
|
|
|
};
|
|
|
|
})(window,jQuery); |