mirror of
https://github.com/gyurix1968/guacamole-client.git
synced 2025-09-06 13:17:41 +00:00
Merge pull request #272 from glyptodon/new-media-streams
GUAC-1354: Migrate to new streaming mechanism.
This commit is contained in:
@@ -1,291 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2015 Glyptodon LLC
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
* THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
var Guacamole = Guacamole || {};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Abstract audio channel which queues and plays arbitrary audio data.
|
|
||||||
*
|
|
||||||
* @constructor
|
|
||||||
*/
|
|
||||||
Guacamole.AudioChannel = function AudioChannel() {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reference to this AudioChannel.
|
|
||||||
*
|
|
||||||
* @private
|
|
||||||
* @type Guacamole.AudioChannel
|
|
||||||
*/
|
|
||||||
var channel = this;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The earliest possible time that the next packet could play without
|
|
||||||
* overlapping an already-playing packet, in milliseconds.
|
|
||||||
*
|
|
||||||
* @private
|
|
||||||
* @type Number
|
|
||||||
*/
|
|
||||||
var nextPacketTime = Guacamole.AudioChannel.getTimestamp();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The last time that sync() was called, in milliseconds. If sync() has
|
|
||||||
* never been called, this will be the time the Guacamole.AudioChannel
|
|
||||||
* was created.
|
|
||||||
*
|
|
||||||
* @type Number
|
|
||||||
*/
|
|
||||||
var lastSync = nextPacketTime;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Notifies this Guacamole.AudioChannel that all audio up to the current
|
|
||||||
* point in time has been given via play(), and that any difference in time
|
|
||||||
* between queued audio packets and the current time can be considered
|
|
||||||
* latency.
|
|
||||||
*/
|
|
||||||
this.sync = function sync() {
|
|
||||||
|
|
||||||
// Calculate elapsed time since last sync
|
|
||||||
var now = Guacamole.AudioChannel.getTimestamp();
|
|
||||||
var elapsed = now - lastSync;
|
|
||||||
|
|
||||||
// Reschedule future playback time such that playback latency is
|
|
||||||
// bounded within the duration of the last audio frame
|
|
||||||
nextPacketTime = Math.min(nextPacketTime, now + elapsed);
|
|
||||||
|
|
||||||
// Record sync time
|
|
||||||
lastSync = now;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Queues up the given data for playing by this channel once all previously
|
|
||||||
* queued data has been played. If no data has been queued, the data will
|
|
||||||
* play immediately.
|
|
||||||
*
|
|
||||||
* @param {String} mimetype
|
|
||||||
* The mimetype of the audio data provided.
|
|
||||||
*
|
|
||||||
* @param {Number} duration
|
|
||||||
* The duration of the data provided, in milliseconds.
|
|
||||||
*
|
|
||||||
* @param {Blob} data
|
|
||||||
* The blob of audio data to play.
|
|
||||||
*/
|
|
||||||
this.play = function play(mimetype, duration, data) {
|
|
||||||
|
|
||||||
var packet = new Guacamole.AudioChannel.Packet(mimetype, data);
|
|
||||||
|
|
||||||
// Determine exactly when packet CAN play
|
|
||||||
var packetTime = Guacamole.AudioChannel.getTimestamp();
|
|
||||||
if (nextPacketTime < packetTime)
|
|
||||||
nextPacketTime = packetTime;
|
|
||||||
|
|
||||||
// Schedule packet
|
|
||||||
packet.play(nextPacketTime);
|
|
||||||
|
|
||||||
// Update timeline
|
|
||||||
nextPacketTime += duration;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
// Define context if available
|
|
||||||
if (window.AudioContext) {
|
|
||||||
try {Guacamole.AudioChannel.context = new AudioContext();}
|
|
||||||
catch (e){}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to Webkit-specific AudioContext implementation
|
|
||||||
else if (window.webkitAudioContext) {
|
|
||||||
try {Guacamole.AudioChannel.context = new webkitAudioContext();}
|
|
||||||
catch (e){}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a base timestamp which can be used for scheduling future audio
|
|
||||||
* playback. Scheduling playback for the value returned by this function plus
|
|
||||||
* N will cause the associated audio to be played back N milliseconds after
|
|
||||||
* the function is called.
|
|
||||||
*
|
|
||||||
* @return {Number} An arbitrary channel-relative timestamp, in milliseconds.
|
|
||||||
*/
|
|
||||||
Guacamole.AudioChannel.getTimestamp = function() {
|
|
||||||
|
|
||||||
// If we have an audio context, use its timestamp
|
|
||||||
if (Guacamole.AudioChannel.context)
|
|
||||||
return Guacamole.AudioChannel.context.currentTime * 1000;
|
|
||||||
|
|
||||||
// If we have high-resolution timers, use those
|
|
||||||
if (window.performance) {
|
|
||||||
|
|
||||||
if (window.performance.now)
|
|
||||||
return window.performance.now();
|
|
||||||
|
|
||||||
if (window.performance.webkitNow)
|
|
||||||
return window.performance.webkitNow();
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to millisecond-resolution system time
|
|
||||||
return new Date().getTime();
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Abstract representation of an audio packet.
|
|
||||||
*
|
|
||||||
* @constructor
|
|
||||||
*
|
|
||||||
* @param {String} mimetype The mimetype of the data contained by this packet.
|
|
||||||
* @param {Blob} data The blob of sound data contained by this packet.
|
|
||||||
*/
|
|
||||||
Guacamole.AudioChannel.Packet = function(mimetype, data) {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Schedules this packet for playback at the given time.
|
|
||||||
*
|
|
||||||
* @function
|
|
||||||
* @param {Number} when The time this packet should be played, in
|
|
||||||
* milliseconds.
|
|
||||||
*/
|
|
||||||
this.play = function(when) { /* NOP */ }; // Defined conditionally depending on support
|
|
||||||
|
|
||||||
// If audio API available, use it.
|
|
||||||
if (Guacamole.AudioChannel.context) {
|
|
||||||
|
|
||||||
var readyBuffer = null;
|
|
||||||
|
|
||||||
// By default, when decoding finishes, store buffer for future
|
|
||||||
// playback
|
|
||||||
var handleReady = function(buffer) {
|
|
||||||
readyBuffer = buffer;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Read data and start decoding
|
|
||||||
var reader = new FileReader();
|
|
||||||
reader.onload = function() {
|
|
||||||
Guacamole.AudioChannel.context.decodeAudioData(
|
|
||||||
reader.result,
|
|
||||||
function(buffer) { handleReady(buffer); }
|
|
||||||
);
|
|
||||||
};
|
|
||||||
reader.readAsArrayBuffer(data);
|
|
||||||
|
|
||||||
// Set up buffer source
|
|
||||||
var source = Guacamole.AudioChannel.context.createBufferSource();
|
|
||||||
source.connect(Guacamole.AudioChannel.context.destination);
|
|
||||||
|
|
||||||
// Use noteOn() instead of start() if necessary
|
|
||||||
if (!source.start)
|
|
||||||
source.start = source.noteOn;
|
|
||||||
|
|
||||||
var play_when;
|
|
||||||
|
|
||||||
function playDelayed(buffer) {
|
|
||||||
source.buffer = buffer;
|
|
||||||
source.start(play_when / 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @ignore */
|
|
||||||
this.play = function(when) {
|
|
||||||
|
|
||||||
play_when = when;
|
|
||||||
|
|
||||||
// If buffer available, play it NOW
|
|
||||||
if (readyBuffer)
|
|
||||||
playDelayed(readyBuffer);
|
|
||||||
|
|
||||||
// Otherwise, play when decoded
|
|
||||||
else
|
|
||||||
handleReady = playDelayed;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
else {
|
|
||||||
|
|
||||||
var play_on_load = false;
|
|
||||||
|
|
||||||
// Create audio element to house and play the data
|
|
||||||
var audio = null;
|
|
||||||
try { audio = new Audio(); }
|
|
||||||
catch (e) {}
|
|
||||||
|
|
||||||
if (audio) {
|
|
||||||
|
|
||||||
// Read data and start decoding
|
|
||||||
var reader = new FileReader();
|
|
||||||
reader.onload = function() {
|
|
||||||
|
|
||||||
var binary = "";
|
|
||||||
var bytes = new Uint8Array(reader.result);
|
|
||||||
|
|
||||||
// Produce binary string from bytes in buffer
|
|
||||||
for (var i=0; i<bytes.byteLength; i++)
|
|
||||||
binary += String.fromCharCode(bytes[i]);
|
|
||||||
|
|
||||||
// Convert to data URI
|
|
||||||
audio.src = "data:" + mimetype + ";base64," + window.btoa(binary);
|
|
||||||
|
|
||||||
// Play if play was attempted but packet wasn't loaded yet
|
|
||||||
if (play_on_load)
|
|
||||||
audio.play();
|
|
||||||
|
|
||||||
};
|
|
||||||
reader.readAsArrayBuffer(data);
|
|
||||||
|
|
||||||
function play() {
|
|
||||||
|
|
||||||
// If audio data is ready, play now
|
|
||||||
if (audio.src)
|
|
||||||
audio.play();
|
|
||||||
|
|
||||||
// Otherwise, play when loaded
|
|
||||||
else
|
|
||||||
play_on_load = true;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @ignore */
|
|
||||||
this.play = function(when) {
|
|
||||||
|
|
||||||
// Calculate time until play
|
|
||||||
var now = Guacamole.AudioChannel.getTimestamp();
|
|
||||||
var delay = when - now;
|
|
||||||
|
|
||||||
// Play now if too late
|
|
||||||
if (delay < 0)
|
|
||||||
play();
|
|
||||||
|
|
||||||
// Otherwise, schedule later playback
|
|
||||||
else
|
|
||||||
window.setTimeout(play, delay);
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
652
guacamole-common-js/src/main/webapp/modules/AudioPlayer.js
Normal file
652
guacamole-common-js/src/main/webapp/modules/AudioPlayer.js
Normal file
@@ -0,0 +1,652 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2015 Glyptodon LLC
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var Guacamole = Guacamole || {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract audio player which accepts, queues and plays back arbitrary audio
|
||||||
|
* data. It is up to implementations of this class to provide some means of
|
||||||
|
* handling a provided Guacamole.InputStream. Data received along the provided
|
||||||
|
* stream is to be played back immediately.
|
||||||
|
*
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
Guacamole.AudioPlayer = function AudioPlayer() {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Notifies this Guacamole.AudioPlayer that all audio up to the current
|
||||||
|
* point in time has been given via the underlying stream, and that any
|
||||||
|
* difference in time between queued audio data and the current time can be
|
||||||
|
* considered latency.
|
||||||
|
*/
|
||||||
|
this.sync = function sync() {
|
||||||
|
// Default implementation - do nothing
|
||||||
|
};
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines whether the given mimetype is supported by any built-in
|
||||||
|
* implementation of Guacamole.AudioPlayer, and thus will be properly handled
|
||||||
|
* by Guacamole.AudioPlayer.getInstance().
|
||||||
|
*
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The mimetype to check.
|
||||||
|
*
|
||||||
|
* @returns {Boolean}
|
||||||
|
* true if the given mimetype is supported by any built-in
|
||||||
|
* Guacamole.AudioPlayer, false otherwise.
|
||||||
|
*/
|
||||||
|
Guacamole.AudioPlayer.isSupportedType = function isSupportedType(mimetype) {
|
||||||
|
|
||||||
|
return Guacamole.RawAudioPlayer.isSupportedType(mimetype);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of all mimetypes supported by any built-in
|
||||||
|
* Guacamole.AudioPlayer, in rough order of priority. Beware that only the core
|
||||||
|
* mimetypes themselves will be listed. Any mimetype parameters, even required
|
||||||
|
* ones, will not be included in the list. For example, "audio/L8" is a
|
||||||
|
* supported raw audio mimetype that is supported, but it is invalid without
|
||||||
|
* additional parameters. Something like "audio/L8;rate=44100" would be valid,
|
||||||
|
* however (see https://tools.ietf.org/html/rfc4856).
|
||||||
|
*
|
||||||
|
* @returns {String[]}
|
||||||
|
* A list of all mimetypes supported by any built-in Guacamole.AudioPlayer,
|
||||||
|
* excluding any parameters.
|
||||||
|
*/
|
||||||
|
Guacamole.AudioPlayer.getSupportedTypes = function getSupportedTypes() {
|
||||||
|
|
||||||
|
return Guacamole.RawAudioPlayer.getSupportedTypes();
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an instance of Guacamole.AudioPlayer providing support for the given
|
||||||
|
* audio format. If support for the given audio format is not available, null
|
||||||
|
* is returned.
|
||||||
|
*
|
||||||
|
* @param {Guacamole.InputStream} stream
|
||||||
|
* The Guacamole.InputStream to read audio data from.
|
||||||
|
*
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The mimetype of the audio data in the provided stream.
|
||||||
|
*
|
||||||
|
* @return {Guacamole.AudioPlayer}
|
||||||
|
* A Guacamole.AudioPlayer instance supporting the given mimetype and
|
||||||
|
* reading from the given stream, or null if support for the given mimetype
|
||||||
|
* is absent.
|
||||||
|
*/
|
||||||
|
Guacamole.AudioPlayer.getInstance = function getInstance(stream, mimetype) {
|
||||||
|
|
||||||
|
// Use raw audio player if possible
|
||||||
|
if (Guacamole.RawAudioPlayer.isSupportedType(mimetype))
|
||||||
|
return new Guacamole.RawAudioPlayer(stream, mimetype);
|
||||||
|
|
||||||
|
// No support for given mimetype
|
||||||
|
return null;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of Guacamole.AudioPlayer providing support for raw PCM format
|
||||||
|
* audio. This player relies only on the Web Audio API and does not require any
|
||||||
|
* browser-level support for its audio formats.
|
||||||
|
*
|
||||||
|
* @constructor
|
||||||
|
* @augments Guacamole.AudioPlayer
|
||||||
|
* @param {Guacamole.InputStream} stream
|
||||||
|
* The Guacamole.InputStream to read audio data from.
|
||||||
|
*
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The mimetype of the audio data in the provided stream, which must be a
|
||||||
|
* "audio/L8" or "audio/L16" mimetype with necessary parameters, such as:
|
||||||
|
* "audio/L16;rate=44100,channels=2".
|
||||||
|
*/
|
||||||
|
Guacamole.RawAudioPlayer = function RawAudioPlayer(stream, mimetype) {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The format of audio this player will decode.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type Guacamole.RawAudioPlayer._Format
|
||||||
|
*/
|
||||||
|
var format = Guacamole.RawAudioPlayer._Format.parse(mimetype);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An instance of a Web Audio API AudioContext object, or null if the
|
||||||
|
* Web Audio API is not supported.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type AudioContext
|
||||||
|
*/
|
||||||
|
var context = (function getAudioContext() {
|
||||||
|
|
||||||
|
// Fallback to Webkit-specific AudioContext implementation
|
||||||
|
var AudioContext = window.AudioContext || window.webkitAudioContext;
|
||||||
|
|
||||||
|
// Get new AudioContext instance if Web Audio API is supported
|
||||||
|
if (AudioContext) {
|
||||||
|
try {
|
||||||
|
return new AudioContext();
|
||||||
|
}
|
||||||
|
catch (e) {
|
||||||
|
// Do not use Web Audio API if not allowed by browser
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Web Audio API not supported
|
||||||
|
return null;
|
||||||
|
|
||||||
|
})();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The earliest possible time that the next packet could play without
|
||||||
|
* overlapping an already-playing packet, in seconds. Note that while this
|
||||||
|
* value is in seconds, it is not an integer value and has microsecond
|
||||||
|
* resolution.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
var nextPacketTime = context.currentTime;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Guacamole.ArrayBufferReader wrapped around the audio input stream
|
||||||
|
* provided with this Guacamole.RawAudioPlayer was created.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type Guacamole.ArrayBufferReader
|
||||||
|
*/
|
||||||
|
var reader = new Guacamole.ArrayBufferReader(stream);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The minimum size of an audio packet split by splitAudioPacket(), in
|
||||||
|
* seconds. Audio packets smaller than this will not be split, nor will the
|
||||||
|
* split result of a larger packet ever be smaller in size than this
|
||||||
|
* minimum.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @constant
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
var MIN_SPLIT_SIZE = 0.02;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The maximum amount of latency to allow between the buffered data stream
|
||||||
|
* and the playback position, in seconds. Initially, this is set to
|
||||||
|
* roughly one third of a second.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
var maxLatency = 0.3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The type of typed array that will be used to represent each audio packet
|
||||||
|
* internally. This will be either Int8Array or Int16Array, depending on
|
||||||
|
* whether the raw audio format is 8-bit or 16-bit.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @constructor
|
||||||
|
*/
|
||||||
|
var SampleArray = (format.bytesPerSample === 1) ? window.Int8Array : window.Int16Array;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The maximum absolute value of any sample within a raw audio packet
|
||||||
|
* received by this audio player. This depends only on the size of each
|
||||||
|
* sample, and will be 128 for 8-bit audio and 32768 for 16-bit audio.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
var maxSampleValue = (format.bytesPerSample === 1) ? 128 : 32768;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The queue of all pending audio packets, as an array of sample arrays.
|
||||||
|
* Audio packets which are pending playback will be added to this queue for
|
||||||
|
* further manipulation prior to scheduling via the Web Audio API. Once an
|
||||||
|
* audio packet leaves this queue and is scheduled via the Web Audio API,
|
||||||
|
* no further modifications can be made to that packet.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @type SampleArray[]
|
||||||
|
*/
|
||||||
|
var packetQueue = [];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given an array of audio packets, returns a single audio packet
|
||||||
|
* containing the concatenation of those packets.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {SampleArray[]} packets
|
||||||
|
* The array of audio packets to concatenate.
|
||||||
|
*
|
||||||
|
* @returns {SampleArray}
|
||||||
|
* A single audio packet containing the concatenation of all given
|
||||||
|
* audio packets. If no packets are provided, this will be undefined.
|
||||||
|
*/
|
||||||
|
var joinAudioPackets = function joinAudioPackets(packets) {
|
||||||
|
|
||||||
|
// Do not bother joining if one or fewer packets are in the queue
|
||||||
|
if (packets.length <= 1)
|
||||||
|
return packets[0];
|
||||||
|
|
||||||
|
// Determine total sample length of the entire queue
|
||||||
|
var totalLength = 0;
|
||||||
|
packets.forEach(function addPacketLengths(packet) {
|
||||||
|
totalLength += packet.length;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Append each packet within queue
|
||||||
|
var offset = 0;
|
||||||
|
var joined = new SampleArray(totalLength);
|
||||||
|
packets.forEach(function appendPacket(packet) {
|
||||||
|
joined.set(packet, offset);
|
||||||
|
offset += packet.length;
|
||||||
|
});
|
||||||
|
|
||||||
|
return joined;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a single packet of audio data, splits off an arbitrary length of
|
||||||
|
* audio data from the beginning of that packet, returning the split result
|
||||||
|
* as an array of two packets. The split location is determined through an
|
||||||
|
* algorithm intended to minimize the liklihood of audible clicking between
|
||||||
|
* packets. If no such split location is possible, an array containing only
|
||||||
|
* the originally-provided audio packet is returned.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {SampleArray} data
|
||||||
|
* The audio packet to split.
|
||||||
|
*
|
||||||
|
* @returns {SampleArray[]}
|
||||||
|
* An array of audio packets containing the result of splitting the
|
||||||
|
* provided audio packet. If splitting is possible, this array will
|
||||||
|
* contain two packets. If splitting is not possible, this array will
|
||||||
|
* contain only the originally-provided packet.
|
||||||
|
*/
|
||||||
|
var splitAudioPacket = function splitAudioPacket(data) {
|
||||||
|
|
||||||
|
var minValue = Number.MAX_VALUE;
|
||||||
|
var optimalSplitLength = data.length;
|
||||||
|
|
||||||
|
// Calculate number of whole samples in the provided audio packet AND
|
||||||
|
// in the minimum possible split packet
|
||||||
|
var samples = Math.floor(data.length / format.channels);
|
||||||
|
var minSplitSamples = Math.floor(format.rate * MIN_SPLIT_SIZE);
|
||||||
|
|
||||||
|
// Calculate the beginning of the "end" of the audio packet
|
||||||
|
var start = Math.max(
|
||||||
|
format.channels * minSplitSamples,
|
||||||
|
format.channels * (samples - minSplitSamples)
|
||||||
|
);
|
||||||
|
|
||||||
|
// For all samples at the end of the given packet, find a point where
|
||||||
|
// the perceptible volume across all channels is lowest (and thus is
|
||||||
|
// the optimal point to split)
|
||||||
|
for (var offset = start; offset < data.length; offset += format.channels) {
|
||||||
|
|
||||||
|
// Calculate the sum of all values across all channels (the result
|
||||||
|
// will be proportional to the average volume of a sample)
|
||||||
|
var totalValue = 0;
|
||||||
|
for (var channel = 0; channel < format.channels; channel++) {
|
||||||
|
totalValue += Math.abs(data[offset + channel]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is the smallest average value thus far, set the split
|
||||||
|
// length such that the first packet ends with the current sample
|
||||||
|
if (totalValue <= minValue) {
|
||||||
|
optimalSplitLength = offset + format.channels;
|
||||||
|
minValue = totalValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// If packet is not split, return the supplied packet untouched
|
||||||
|
if (optimalSplitLength === data.length)
|
||||||
|
return [data];
|
||||||
|
|
||||||
|
// Otherwise, split the packet into two new packets according to the
|
||||||
|
// calculated optimal split length
|
||||||
|
return [
|
||||||
|
new SampleArray(data.buffer.slice(0, optimalSplitLength * format.bytesPerSample)),
|
||||||
|
new SampleArray(data.buffer.slice(optimalSplitLength * format.bytesPerSample))
|
||||||
|
];
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pushes the given packet of audio data onto the playback queue. Unlike
|
||||||
|
* other private functions within Guacamole.RawAudioPlayer, the type of the
|
||||||
|
* ArrayBuffer packet of audio data here need not be specific to the type
|
||||||
|
* of audio (as with SampleArray). The ArrayBuffer type provided by a
|
||||||
|
* Guacamole.ArrayBufferReader, for example, is sufficient. Any necessary
|
||||||
|
* conversions will be performed automatically internally.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {ArrayBuffer} data
|
||||||
|
* A raw packet of audio data that should be pushed onto the audio
|
||||||
|
* playback queue.
|
||||||
|
*/
|
||||||
|
var pushAudioPacket = function pushAudioPacket(data) {
|
||||||
|
packetQueue.push(new SampleArray(data));
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shifts off and returns a packet of audio data from the beginning of the
|
||||||
|
* playback queue. The length of this audio packet is determined
|
||||||
|
* dynamically according to the click-reduction algorithm implemented by
|
||||||
|
* splitAudioPacket().
|
||||||
|
*
|
||||||
|
* @returns {SampleArray}
|
||||||
|
* A packet of audio data pulled from the beginning of the playback
|
||||||
|
* queue.
|
||||||
|
*/
|
||||||
|
var shiftAudioPacket = function shiftAudioPacket() {
|
||||||
|
|
||||||
|
// Flatten data in packet queue
|
||||||
|
var data = joinAudioPackets(packetQueue);
|
||||||
|
if (!data)
|
||||||
|
return null;
|
||||||
|
|
||||||
|
// Pull an appropriate amount of data from the front of the queue
|
||||||
|
packetQueue = splitAudioPacket(data);
|
||||||
|
data = packetQueue.shift();
|
||||||
|
|
||||||
|
return data;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts the given audio packet into an AudioBuffer, ready for playback
|
||||||
|
* by the Web Audio API. Unlike the raw audio packets received by this
|
||||||
|
* audio player, AudioBuffers require floating point samples and are split
|
||||||
|
* into isolated planes of channel-specific data.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {SampleArray} data
|
||||||
|
* The raw audio packet that should be converted into a Web Audio API
|
||||||
|
* AudioBuffer.
|
||||||
|
*
|
||||||
|
* @returns {AudioBuffer}
|
||||||
|
* A new Web Audio API AudioBuffer containing the provided audio data,
|
||||||
|
* converted to the format used by the Web Audio API.
|
||||||
|
*/
|
||||||
|
var toAudioBuffer = function toAudioBuffer(data) {
|
||||||
|
|
||||||
|
// Calculate total number of samples
|
||||||
|
var samples = data.length / format.channels;
|
||||||
|
|
||||||
|
// Determine exactly when packet CAN play
|
||||||
|
var packetTime = context.currentTime;
|
||||||
|
if (nextPacketTime < packetTime)
|
||||||
|
nextPacketTime = packetTime;
|
||||||
|
|
||||||
|
// Get audio buffer for specified format
|
||||||
|
var audioBuffer = context.createBuffer(format.channels, samples, format.rate);
|
||||||
|
|
||||||
|
// Convert each channel
|
||||||
|
for (var channel = 0; channel < format.channels; channel++) {
|
||||||
|
|
||||||
|
var audioData = audioBuffer.getChannelData(channel);
|
||||||
|
|
||||||
|
// Fill audio buffer with data for channel
|
||||||
|
var offset = channel;
|
||||||
|
for (var i = 0; i < samples; i++) {
|
||||||
|
audioData[i] = data[offset] / maxSampleValue;
|
||||||
|
offset += format.channels;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return audioBuffer;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
// Defer playback of received audio packets slightly
|
||||||
|
reader.ondata = function playReceivedAudio(data) {
|
||||||
|
|
||||||
|
// Push received samples onto queue
|
||||||
|
pushAudioPacket(new SampleArray(data));
|
||||||
|
|
||||||
|
// Shift off an arbitrary packet of audio data from the queue (this may
|
||||||
|
// be different in size from the packet just pushed)
|
||||||
|
var packet = shiftAudioPacket();
|
||||||
|
if (!packet)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Determine exactly when packet CAN play
|
||||||
|
var packetTime = context.currentTime;
|
||||||
|
if (nextPacketTime < packetTime)
|
||||||
|
nextPacketTime = packetTime;
|
||||||
|
|
||||||
|
// Set up buffer source
|
||||||
|
var source = context.createBufferSource();
|
||||||
|
source.connect(context.destination);
|
||||||
|
|
||||||
|
// Use noteOn() instead of start() if necessary
|
||||||
|
if (!source.start)
|
||||||
|
source.start = source.noteOn;
|
||||||
|
|
||||||
|
// Schedule packet
|
||||||
|
source.buffer = toAudioBuffer(packet);
|
||||||
|
source.start(nextPacketTime);
|
||||||
|
|
||||||
|
// Update timeline by duration of scheduled packet
|
||||||
|
nextPacketTime += packet.length / format.channels / format.rate;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/** @override */
|
||||||
|
this.sync = function sync() {
|
||||||
|
|
||||||
|
// Calculate elapsed time since last sync
|
||||||
|
var now = context.currentTime;
|
||||||
|
|
||||||
|
// Reschedule future playback time such that playback latency is
|
||||||
|
// bounded within a reasonable latency threshold
|
||||||
|
nextPacketTime = Math.min(nextPacketTime, now + maxLatency);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
Guacamole.RawAudioPlayer.prototype = new Guacamole.AudioPlayer();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A description of the format of raw PCM audio received by a
|
||||||
|
* Guacamole.RawAudioPlayer. This object describes the number of bytes per
|
||||||
|
* sample, the number of channels, and the overall sample rate.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @constructor
|
||||||
|
* @param {Guacamole.RawAudioPlayer._Format|Object} template
|
||||||
|
* The object whose properties should be copied into the corresponding
|
||||||
|
* properties of the new Guacamole.RawAudioPlayer._Format.
|
||||||
|
*/
|
||||||
|
Guacamole.RawAudioPlayer._Format = function _Format(template) {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of bytes in each sample of audio data. This value is
|
||||||
|
* independent of the number of channels.
|
||||||
|
*
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
this.bytesPerSample = template.bytesPerSample;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of audio channels (ie: 1 for mono, 2 for stereo).
|
||||||
|
*
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
this.channels = template.channels;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of samples per second, per channel.
|
||||||
|
*
|
||||||
|
* @type Number
|
||||||
|
*/
|
||||||
|
this.rate = template.rate;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses the given mimetype, returning a new Guacamole.RawAudioPlayer._Format
|
||||||
|
* which describes the type of raw audio data represented by that mimetype. If
|
||||||
|
* the mimetype is not supported by Guacamole.RawAudioPlayer, null is returned.
|
||||||
|
*
|
||||||
|
* @private
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The audio mimetype to parse.
|
||||||
|
*
|
||||||
|
* @returns {Guacamole.RawAudioPlayer._Format}
|
||||||
|
* A new Guacamole.RawAudioPlayer._Format which describes the type of raw
|
||||||
|
* audio data represented by the given mimetype, or null if the given
|
||||||
|
* mimetype is not supported.
|
||||||
|
*/
|
||||||
|
Guacamole.RawAudioPlayer._Format.parse = function parseFormat(mimetype) {
|
||||||
|
|
||||||
|
var bytesPerSample;
|
||||||
|
|
||||||
|
// Rate is absolutely required - if null is still present later, the
|
||||||
|
// mimetype must not be supported
|
||||||
|
var rate = null;
|
||||||
|
|
||||||
|
// Default for both "audio/L8" and "audio/L16" is one channel
|
||||||
|
var channels = 1;
|
||||||
|
|
||||||
|
// "audio/L8" has one byte per sample
|
||||||
|
if (mimetype.substring(0, 9) === 'audio/L8;') {
|
||||||
|
mimetype = mimetype.substring(9);
|
||||||
|
bytesPerSample = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// "audio/L16" has two bytes per sample
|
||||||
|
else if (mimetype.substring(0, 10) === 'audio/L16;') {
|
||||||
|
mimetype = mimetype.substring(10);
|
||||||
|
bytesPerSample = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All other types are unsupported
|
||||||
|
else
|
||||||
|
return null;
|
||||||
|
|
||||||
|
// Parse all parameters
|
||||||
|
var parameters = mimetype.split(',');
|
||||||
|
for (var i = 0; i < parameters.length; i++) {
|
||||||
|
|
||||||
|
var parameter = parameters[i];
|
||||||
|
|
||||||
|
// All parameters must have an equals sign separating name from value
|
||||||
|
var equals = parameter.indexOf('=');
|
||||||
|
if (equals === -1)
|
||||||
|
return null;
|
||||||
|
|
||||||
|
// Parse name and value from parameter string
|
||||||
|
var name = parameter.substring(0, equals);
|
||||||
|
var value = parameter.substring(equals+1);
|
||||||
|
|
||||||
|
// Handle each supported parameter
|
||||||
|
switch (name) {
|
||||||
|
|
||||||
|
// Number of audio channels
|
||||||
|
case 'channels':
|
||||||
|
channels = parseInt(value);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// Sample rate
|
||||||
|
case 'rate':
|
||||||
|
rate = parseInt(value);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// All other parameters are unsupported
|
||||||
|
default:
|
||||||
|
return null;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
// The rate parameter is required
|
||||||
|
if (rate === null)
|
||||||
|
return null;
|
||||||
|
|
||||||
|
// Return parsed format details
|
||||||
|
return new Guacamole.RawAudioPlayer._Format({
|
||||||
|
bytesPerSample : bytesPerSample,
|
||||||
|
channels : channels,
|
||||||
|
rate : rate
|
||||||
|
});
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines whether the given mimetype is supported by
|
||||||
|
* Guacamole.RawAudioPlayer.
|
||||||
|
*
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The mimetype to check.
|
||||||
|
*
|
||||||
|
* @returns {Boolean}
|
||||||
|
* true if the given mimetype is supported by Guacamole.RawAudioPlayer,
|
||||||
|
* false otherwise.
|
||||||
|
*/
|
||||||
|
Guacamole.RawAudioPlayer.isSupportedType = function isSupportedType(mimetype) {
|
||||||
|
|
||||||
|
// No supported types if no Web Audio API
|
||||||
|
if (!window.AudioContext && !window.webkitAudioContext)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return Guacamole.RawAudioPlayer._Format.parse(mimetype) !== null;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of all mimetypes supported by Guacamole.RawAudioPlayer. Only
|
||||||
|
* the core mimetypes themselves will be listed. Any mimetype parameters, even
|
||||||
|
* required ones, will not be included in the list. For example, "audio/L8" is
|
||||||
|
* a raw audio mimetype that may be supported, but it is invalid without
|
||||||
|
* additional parameters. Something like "audio/L8;rate=44100" would be valid,
|
||||||
|
* however (see https://tools.ietf.org/html/rfc4856).
|
||||||
|
*
|
||||||
|
* @returns {String[]}
|
||||||
|
* A list of all mimetypes supported by Guacamole.RawAudioPlayer, excluding
|
||||||
|
* any parameters. If the necessary JavaScript APIs for playing raw audio
|
||||||
|
* are absent, this list will be empty.
|
||||||
|
*/
|
||||||
|
Guacamole.RawAudioPlayer.getSupportedTypes = function getSupportedTypes() {
|
||||||
|
|
||||||
|
// No supported types if no Web Audio API
|
||||||
|
if (!window.AudioContext && !window.webkitAudioContext)
|
||||||
|
return [];
|
||||||
|
|
||||||
|
// We support 8-bit and 16-bit raw PCM
|
||||||
|
return [
|
||||||
|
'audio/L8',
|
||||||
|
'audio/L16'
|
||||||
|
];
|
||||||
|
|
||||||
|
};
|
@@ -78,12 +78,12 @@ Guacamole.Client = function(tunnel) {
|
|||||||
var layers = {};
|
var layers = {};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All audio channels currentl in use by the client. Initially, this will
|
* All audio players currently in use by the client. Initially, this will
|
||||||
* be empty, but channels may be allocated by the server upon request.
|
* be empty, but audio players may be allocated by the server upon request.
|
||||||
*
|
*
|
||||||
* @type Object.<Number, Guacamole.AudioChannel>
|
* @type Object.<Number, Guacamole.AudioPlayer>
|
||||||
*/
|
*/
|
||||||
var audioChannels = {};
|
var audioPlayers = {};
|
||||||
|
|
||||||
// No initial parsers
|
// No initial parsers
|
||||||
var parsers = [];
|
var parsers = [];
|
||||||
@@ -440,6 +440,25 @@ Guacamole.Client = function(tunnel) {
|
|||||||
*/
|
*/
|
||||||
this.onerror = null;
|
this.onerror = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fired when a audio stream is created. The stream provided to this event
|
||||||
|
* handler will contain its own event handlers for received data.
|
||||||
|
*
|
||||||
|
* @event
|
||||||
|
* @param {Guacamole.InputStream} stream
|
||||||
|
* The stream that will receive audio data from the server.
|
||||||
|
*
|
||||||
|
* @param {String} mimetype
|
||||||
|
* The mimetype of the audio data which will be received.
|
||||||
|
*
|
||||||
|
* @return {Guacamole.AudioPlayer}
|
||||||
|
* An object which implements the Guacamole.AudioPlayer interface and
|
||||||
|
* has been initialied to play the data in the provided stream, or null
|
||||||
|
* if the built-in audio players of the Guacamole client should be
|
||||||
|
* used.
|
||||||
|
*/
|
||||||
|
this.onaudio = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fired when the clipboard of the remote client is changing.
|
* Fired when the clipboard of the remote client is changing.
|
||||||
*
|
*
|
||||||
@@ -499,27 +518,6 @@ Guacamole.Client = function(tunnel) {
|
|||||||
*/
|
*/
|
||||||
this.onsync = null;
|
this.onsync = null;
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the audio channel having the given index, creating a new channel
|
|
||||||
* if necessary.
|
|
||||||
*
|
|
||||||
* @param {Number} index
|
|
||||||
* The index of the audio channel to retrieve.
|
|
||||||
*
|
|
||||||
* @returns {Guacamole.AudioChannel}
|
|
||||||
* The audio channel having the given index.
|
|
||||||
*/
|
|
||||||
var getAudioChannel = function getAudioChannel(index) {
|
|
||||||
|
|
||||||
// Get audio channel, creating it first if necessary
|
|
||||||
var audio_channel = audioChannels[index];
|
|
||||||
if (!audio_channel)
|
|
||||||
audio_channel = audioChannels[index] = new Guacamole.AudioChannel();
|
|
||||||
|
|
||||||
return audio_channel;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the layer with the given index, creating it if necessary.
|
* Returns the layer with the given index, creating it if necessary.
|
||||||
* Positive indices refer to visible layers, an index of zero refers to
|
* Positive indices refer to visible layers, an index of zero refers to
|
||||||
@@ -626,24 +624,30 @@ Guacamole.Client = function(tunnel) {
|
|||||||
"audio": function(parameters) {
|
"audio": function(parameters) {
|
||||||
|
|
||||||
var stream_index = parseInt(parameters[0]);
|
var stream_index = parseInt(parameters[0]);
|
||||||
var channel = getAudioChannel(parseInt(parameters[1]));
|
var mimetype = parameters[1];
|
||||||
var mimetype = parameters[2];
|
|
||||||
var duration = parseFloat(parameters[3]);
|
|
||||||
|
|
||||||
// Create stream
|
// Create stream
|
||||||
var stream = streams[stream_index] =
|
var stream = streams[stream_index] =
|
||||||
new Guacamole.InputStream(guac_client, stream_index);
|
new Guacamole.InputStream(guac_client, stream_index);
|
||||||
|
|
||||||
// Assemble entire stream as a blob
|
// Get player instance via callback
|
||||||
var blob_reader = new Guacamole.BlobReader(stream, mimetype);
|
var audioPlayer = null;
|
||||||
|
if (guac_client.onaudio)
|
||||||
|
audioPlayer = guac_client.onaudio(stream, mimetype);
|
||||||
|
|
||||||
// Play blob as audio
|
// If unsuccessful, try to use a default implementation
|
||||||
blob_reader.onend = function() {
|
if (!audioPlayer)
|
||||||
channel.play(mimetype, duration, blob_reader.getBlob());
|
audioPlayer = Guacamole.AudioPlayer.getInstance(stream, mimetype);
|
||||||
};
|
|
||||||
|
|
||||||
// Send success response
|
// If we have successfully retrieved an audio player, send success response
|
||||||
guac_client.sendAck(stream_index, "OK", 0x0000);
|
if (audioPlayer) {
|
||||||
|
audioPlayers[stream_index] = audioPlayer;
|
||||||
|
guac_client.sendAck(stream_index, "OK", 0x0000);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, mimetype must be unsupported
|
||||||
|
else
|
||||||
|
guac_client.sendAck(stream_index, "BAD TYPE", 0x030F);
|
||||||
|
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -1113,11 +1117,11 @@ Guacamole.Client = function(tunnel) {
|
|||||||
// Flush display, send sync when done
|
// Flush display, send sync when done
|
||||||
display.flush(function displaySyncComplete() {
|
display.flush(function displaySyncComplete() {
|
||||||
|
|
||||||
// Synchronize all audio channels
|
// Synchronize all audio players
|
||||||
for (var index in audioChannels) {
|
for (var index in audioPlayers) {
|
||||||
var audioChannel = audioChannels[index];
|
var audioPlayer = audioPlayers[index];
|
||||||
if (audioChannel)
|
if (audioPlayer)
|
||||||
audioChannel.sync();
|
audioPlayer.sync();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send sync response to server
|
// Send sync response to server
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 2014 Glyptodon LLC
|
* Copyright (C) 2015 Glyptodon LLC
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -31,82 +31,11 @@ angular.module('client').factory('guacAudio', [function guacAudio() {
|
|||||||
return new (function() {
|
return new (function() {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Array of codecs to test.
|
* Array of all supported audio mimetypes.
|
||||||
*
|
*
|
||||||
* @type String[]
|
* @type String[]
|
||||||
*/
|
*/
|
||||||
var codecs = [
|
this.supported = Guacamole.AudioPlayer.getSupportedTypes();
|
||||||
'audio/ogg; codecs="vorbis"',
|
|
||||||
'audio/mp4; codecs="mp4a.40.5"',
|
|
||||||
'audio/mpeg; codecs="mp3"',
|
|
||||||
'audio/webm; codecs="vorbis"',
|
|
||||||
'audio/wav; codecs=1'
|
|
||||||
];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Array of all codecs that are reported as "probably" supported.
|
|
||||||
*
|
|
||||||
* @type String[]
|
|
||||||
*/
|
|
||||||
var probably_supported = [];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Array of all codecs that are reported as "maybe" supported.
|
|
||||||
*
|
|
||||||
* @type String[]
|
|
||||||
*/
|
|
||||||
var maybe_supported = [];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal audio element for the sake of testing codec support. If
|
|
||||||
* audio is explicitly not supported by the browser, this will instead
|
|
||||||
* be null.
|
|
||||||
*
|
|
||||||
* @type Audio
|
|
||||||
*/
|
|
||||||
var audio = null;
|
|
||||||
|
|
||||||
// Attempt to create audio element
|
|
||||||
try {
|
|
||||||
audio = new Audio();
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
// If creation fails, allow audio to remain null
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Array of all supported audio mimetypes, ordered by liklihood of
|
|
||||||
* working.
|
|
||||||
*/
|
|
||||||
this.supported = [];
|
|
||||||
|
|
||||||
// Build array of supported audio formats (if audio supported at all)
|
|
||||||
if (audio) {
|
|
||||||
codecs.forEach(function(mimetype) {
|
|
||||||
|
|
||||||
var support_level = audio.canPlayType(mimetype);
|
|
||||||
|
|
||||||
// Trim semicolon and trailer
|
|
||||||
var semicolon = mimetype.indexOf(";");
|
|
||||||
if (semicolon !== -1)
|
|
||||||
mimetype = mimetype.substring(0, semicolon);
|
|
||||||
|
|
||||||
// Partition by probably/maybe
|
|
||||||
if (support_level === "probably")
|
|
||||||
probably_supported.push(mimetype);
|
|
||||||
else if (support_level === "maybe")
|
|
||||||
maybe_supported.push(mimetype);
|
|
||||||
|
|
||||||
});
|
|
||||||
|
|
||||||
// Add probably supported types first
|
|
||||||
Array.prototype.push.apply(
|
|
||||||
this.supported, probably_supported);
|
|
||||||
|
|
||||||
// Prioritize "maybe" supported types second
|
|
||||||
Array.prototype.push.apply(
|
|
||||||
this.supported, maybe_supported);
|
|
||||||
}
|
|
||||||
|
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user