% decoder outline Script
% this script will outline how to get the data into matlab from both the
% audio camera and the EEG systm.  The setup should be as follows.  The
% audio camera should be connected to this computer using the express port
% and the usb.  The eeg should be connected to the second acquisition
% machine and connected through the Gb switch to this computer.  There are
% several ipconfig.txt files that outline how the network is connected.
% EEGipconfig.txt: this file contains the ip of this machine and the port
% that will be used to grab data from the EEG.  
% there is an ipconfig.txt file in C:\Program Files\VisiSonics\Bin\rt-002
% that contains the local ip for use with the audio camera.  in this
% architecture this should be 127.0.0.1 and the port is somewhat arbitrary
% we are using 5008 but it can be any unused port.  

envResamplingFactor = 441;
Fs = 44100; % the samplerate of the camera, can't be changed
load('g_2.mat');

% Start acquisition of the audio camera
camera = connect_to_audio_camera('127.0.0.1',5008);
pause(5);

%start EEG acquisition
EEG = connect_to_EEG('169.254.158.90',50005);

% this call will pseudo syncronize the streams. 
camera.userdata.latestSecond = [];
EEG.userdata.latestSecond = [];

disp('waiting for camera ... ');
switch_headphone_mode(camera,3);

disp('ready to go');

% at this point we are logging data in the objects camera and EEG
% We will start an infinite loop that that will wait until one second of
% data has been acquired in each mode and then we will copy it to the local
% process. 
envBuffer1 = [];
envBuffer2 = [];
eegBuffer  = [];
k = -99;
for i = 1:1000   
    if(length(camera.userdata.latestSecond) > 0 && length(EEG.userdata.latestSecond) > 0)
        CleanSpeech = camera.userdata.latestSecond;
        CleanSpeech = CleanSpeech./32767;
        % CleanSpeech: 2x44100 samples
        EEGdata = EEG.userdata.latestSecond;

        % do decoder and alignment here using the last frame of data. this
        % should be done in less than a second so we can grab the next frame.  

        envelope_1 = resample(abs(hilbert(CleanSpeech(1,:))),10,envResamplingFactor)'; 
        envelope_2 = resample(abs(hilbert(CleanSpeech(2,:))),10,envResamplingFactor)'; 


        if length(eegBuffer)>=45000
            envBuffer1 = envBuffer1(1001:end,:);
            envBuffer2 = envBuffer2(1001:end,:);
            eegBuffer = eegBuffer(1001:end,:);
        end
        disp('appending to buffers')
        envBuffer1 = [envBuffer1; envelope_1];
        envBuffer2 = [envBuffer2; envelope_2];
        eegBuffer = [eegBuffer; EEGdata(1:32,:)'];

        disp(length(envBuffer1))
        if length(envBuffer1)>= 45000            
             k = Decoder(eegBuffer(1:45000,:), envBuffer1(1:45000,:), envBuffer2(1:45000,:),g, rlags, threshold, ind);
             switch_box_mode(camera, k);
             disp('switching box mode')
        end

        disp(k)

        EEG.DatagramReceivedFcn = @myEEG_callback;
        camera.DatagramReceivedFcn = @myAC_callback;
       
    end
    pause(1);
end
disp('end of demo');