The Nx API allows third-party applications to send audio to a camera and receive audio from a camera, enabling 2-way audio communication through security cameras.
How does 2-way audio work?
Your camera must support 2-way audio, and be supported by Network Optix. Please check the IPVD with the tag “2-way audio devices” enabled.
Basic format for sending audio to camera
wss://<server_ip>:<server_port>/api/http_audio?camera_Id=<id>&format=<audio_sample_format>&sample_rate=<audio_sample_rate>&channels=<audio_channels_count>
Replace the <server_ip>
, <server_port>
, and <id>
with the relevant information. <id>
should reflect the Camera ID of the camera you wish to use.
When using a raw audio format (PCM), include the following parameters:
<audio_sample_format>
— use one of the following formats: u8, s16be, s16le, s32be, s32le, f32be, f32le, f64be, and f64le.<audio_sample_rate>
— input the desired audio sample rate, which is an integer value.<audio_channels_count>
— use 1 for mono and 2 for stereo.
Note: For audio mixed with a media container (e.g. MP4, WAV, AAC, etc), exclude these parameters from your request.
Example of a request:
wss://localhost:7001/api/http_audio?camera_Id=2a4717bb-1d3e-4878-a28b-af4eaedbfb89&format=f32le&sample_rate=44100&channels=1
Receiving audio from a camera.
Basic format for requesting audio from a camera:
https://<server_ip>:<server_port>/media/<camera_id>.webm?audio_only
Code example
The code below is a sample HTML page that demonstrates the functionality of using 2-way audio in the browser through the API.
To test the sample below, input your information at the top (including Camera ID) and click Connect in the ‘Mic’ section. If your browser asks for microphone permission, allow it.
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Upload Files</title>
</head>
<body>
Server: <input type="text" id="server" size="40" value="localhost:7001"><br>
User: <input type="text" id="user" size="40" value="admin"><br>
Password: <input type="text" id="pass" size="40" value="qweasd234"><br>
Camera ID: <input type="text" id="camera_id" size="40" value="2a4717bb-1d3e-4878-a28b-af4eaedbfb89"><br>
<h2>Mic</h2>
<input type="button" value="Connect" onclick="connectMic()" />
<br>
<input type="button" value="Mic" onclick="webaudio_tooling_obj()" />
<br>
<br>
<h2>File Upload</h2>
<input type="file" id="filename" />
<br>
<input type="button" value="Connect" onclick="connectFile()" />
<br>
<input type="button" value="Upload" onclick="sendFile()" />
<br>
<input type="button" value="close" onclick="close()" />
<br>
<h2>Audio from camera(update camera ID in url manually)</h2>
<audio controls="controls">
<source src="http://localhost:7001/media/2a4717bb-1d3e-4878-a28b-af4eaedbfb89.webm?audio_only" type="audio/aac">
</audio>
<script>
var ws;
function close()
{
ws.close();
}
function urlPrefix() {
return url = "ws://"
+ document.getElementById('user').value + ":"
+ document.getElementById('pass').value + "@"
+ document.getElementById('server').value + "/api/http_audio?camera_id=";
}
function connectFile() {
var url = urlPrefix() + document.getElementById('camera_id').value;
connect(url);
}
function connectMic() {
var url = urlPrefix() + document.getElementById('camera_id').value + "&format=f32le&sample_rate=44100&channels=1";
connect(url);
}
function connect(url) {
console.log("connect to", url);
ws = new WebSocket(url);
ws.binaryType = "arraybuffer";
ws.onopen = function() {
console.log("connected");
};
ws.onclose = function() {
console.log("Connection is closed...");
};
ws.onerror = function(e) {
console.log("Websocket error: " + e.msg);
}
}
function sendFile() {
var file = document.getElementById('filename').files[0];
var reader = new FileReader();
var rawData = new ArrayBuffer();
reader.loadend = function() {
}
reader.onload = function(e) {
rawData = e.target.result;
ws.send(rawData);
//ws.close();
alert("the File has been transferred.")
}
reader.readAsArrayBuffer(file);
}
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_fft_node = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function process_microphone_buffer(event) { // invoked by event loop
var i, N, inp, microphone_output_buffer;
if (ws.readyState != WebSocket.OPEN) {
console.log("connection closed");
gain_node.disconnect(audioContext.destination);
microphone_stream.disconnect(gain_node);
return;
}
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
console.log("sample rate(update in url if differ)", event.inputBuffer.sampleRate);
ws.send(microphone_output_buffer);
}
function start_microphone(stream){
console.log("microphone is starting up ...");
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
script_processor_node.connect(gain_node);
microphone_stream.connect(script_processor_node);
}
}; // webaudio_tooling_obj = function()
</script>
</body>
</html>
Comments
0 comments
Article is closed for comments.