diff --git a/analyze_source.m b/analyze_source.m
new file mode 100644
index 0000000000000000000000000000000000000000..a2ba915bf55bdde68e6acfa357659dbcc9121dd7
--- /dev/null
+++ b/analyze_source.m
@@ -0,0 +1,84 @@
+function [source_num,source_name] = analyze_source(audio,fs)
+%WHICHSOURCE Estimates whether we're listening to source 1, 2, or 3
+%
+% 1) Male voice
+% 2) Female
+% 3) Trumpet
+
+% Labels
+source_labels = {'male','female','trumpet'};
+
+% Hard coded training data for sources 1, 2, 3
+% This is the mean pitch, calculated over the entire file, for each of the
+% three sources. It doesn't reflect the fundamental frequency, and is not
+% generalisable to files outside of the three in the training set.
+mean_pitch = [301 235 316];
+
+
+% USE AFE TO CALCULATE MEAN PITCH
+
+% Set parameters
+%%% Set parameters
+
+% Request pitch processor
+requests = {'pitch'};
+
+% Parameters of auditory filterbank
+fb_type       = 'gammatone';
+fb_lowFreqHz  = 80;
+fb_highFreqHz = 8000;
+fb_nChannels  = 16;
+
+% Parameters of innerhaircell processor
+ihc_method    = 'dau';
+
+% Parameters of autocorrelation processor
+ac_wSizeSec   = 0.02;
+ac_hSizeSec   = 0.01;
+ac_clipAlpha  = 0.0;
+ac_K          = 2;
+ac_wname      = 'hann';
+
+% Parameters of pitch processor
+pi_rangeHz     = [80 400];              % default: [80 400]
+pi_confThres   = 0.7;
+pi_medianOrder = 3;
+
+% Parameters
+par = genParStruct('fb_type',fb_type,'fb_lowFreqHz',fb_lowFreqHz,...
+    'fb_highFreqHz',fb_highFreqHz,'fb_nChannels',fb_nChannels,...
+    'ihc_method',ihc_method,'ac_wSizeSec',ac_wSizeSec,...
+    'ac_hSizeSec',ac_hSizeSec,'ac_clipAlpha',ac_clipAlpha,...
+    'ac_K',ac_K,'ac_wname',ac_wname,'pi_rangeHz',pi_rangeHz,...
+    'pi_confThres',pi_confThres,'pi_medianOrder',pi_medianOrder);
+
+% Create a data object
+dObj = dataObject(audio,fs);
+
+% Create a manager
+mObj = manager(dObj,requests,par);
+
+% Request processing
+mObj.processSignal();
+
+
+%%% GET THE DATA
+
+% Get the data from the object
+data = dObj.pitch{1}.Data(:);
+
+% Get the mean pitch in this chunk of audio
+meanvalue = mean(data(~isnan(data(:,1)),1));
+%stdvalue = std(data(~isnan(data(:,1)),1));
+
+%%% GUESS THE SOURCE
+
+% Choose which source you think it is
+[~,source_num] = min(abs(mean_pitch - meanvalue));
+
+% Get label
+source_name = source_labels{source_num};
+
+
+
+
diff --git a/state_machine.m b/state_machine.m
index 60da0b3212a9a6e74f3f4dbaf0347eaf4728fa67..6a99e52e5e4fc8a56c31e2e6a964530d6a573848 100644
--- a/state_machine.m
+++ b/state_machine.m
@@ -1,9 +1,11 @@
 d2r = 180/pi;
 th_az = 5; % Degrees
 step_distance = 0.1; % meters
-% sendPosition.moveRelativePosition('map', moveFront, moveLeft, rotateClockwise);
 th_dist = 0.78;
 
+% sendPosition.moveRelativePosition('map', moveFront, moveLeft, rotateClockwise);
+
+
 while(true)
     
     pause(0.01)
@@ -26,14 +28,21 @@ while(true)
     end
     
     % We are there, analyze the source
-    
-    source_label = analyze_source(audio);
+    audio = get_audio(12000);
+    [source_num,source_name] = analyze_source(audio,fs);
     display('Source label determined')
-    disp(source_label)
+    disp(source_name)
     
     key = input('Press any key to continue, press c to exit: ','s');
     if key == 'c'
         break
     end
     
-end
\ No newline at end of file
+end
+
+
+
+
+ 
+% %To get the current position:
+% position = sendPosition.NavigationState().NavigationState.position;
\ No newline at end of file