shithub: aubio

Download patch

ref: c912c67ccd0dd420833c8e063fe084f3e59bb85a
parent: e968939e0135dcc1d09d611d875dc07b0605e862
author: Paul Brossier <piem@altern.org>
date: Fri Feb 17 12:17:10 EST 2006

merge some benchonset code into node
merge some benchonset code into node


--- a/python/aubio/bench/node.py
+++ b/python/aubio/bench/node.py
@@ -143,20 +143,62 @@
 		print "Creating results directory"
 		act_on_results(mkdir,self.datadir,self.resdir,filter='d')
 
-	def pretty_print(self,values,sep='|'):
-		for i in range(len(values)):
-			print self.formats[i] % values[i], sep,
+	def pretty_print(self,sep='|'):
+		for i in self.printnames:
+			print self.formats[i] % self.v[i], sep,
 		print
 
+	def pretty_titles(self,sep='|'):
+		for i in self.printnames:
+			print self.formats[i] % i, sep,
+		print
+
 	def dir_exec(self):
 		""" run file_exec on every input file """
-		pass
+		self.l , self.labs = [], [] 
+		self.v = {}
+		for i in self.valuenames:
+			self.v[i] = [] 
+		for i in self.valuelists:
+			self.v[i] = [] 
+		act_on_files(self.file_exec,self.sndlist,self.reslist, \
+			suffix='',filter=sndfile_filter)
 
 	def dir_eval(self):
 		pass
 
-	def file_exec(self):
-		pass
+	def file_gettruth(self,input):
+		""" get ground truth filenames """
+		from os.path import isfile
+		ftrulist = []
+		# search for match as filetask.input,".txt" 
+		ftru = '.'.join(input.split('.')[:-1])
+		ftru = '.'.join((ftru,'txt'))
+		if isfile(ftru):
+			ftrulist.append(ftru)
+		else:
+			# search for matches for filetask.input in the list of results
+			for i in range(len(self.reslist)):
+				check = '.'.join(self.reslist[i].split('.')[:-1])
+				check = '_'.join(check.split('_')[:-1])
+				if check == '.'.join(input.split('.')[:-1]):
+					ftrulist.append(self.reslist[i])
+		return ftrulist
+
+	def file_exec(self,input,output):
+		""" create filetask, extract data, evaluate """
+		filetask = self.task(input,params=self.params)
+		computed_data = filetask.compute_all()
+		ftrulist = self.file_gettruth(filetask.input)
+		for i in ftrulist:
+			filetask.eval(computed_data,i,mode='rocloc',vmode='')
+			""" append filetask.v to self.v """
+			for i in self.valuenames:
+				self.v[i].append(filetask.v[i])
+			for j in self.valuelists:
+				if filetask.v[j]:
+					for i in range(len(filetask.v[j])):
+						self.v[j].append(filetask.v[j][i])
 	
 	def file_eval(self):
 		pass
--- a/python/test/bench/onset/bench-onset
+++ b/python/test/bench/onset/bench-onset
@@ -19,89 +19,38 @@
 
 class benchonset(bench):
 
+	""" list of values to store per file """
 	valuenames = ['orig','missed','Tm','expc','bad','Td']
+	""" list of lists to store per file """
 	valuelists = ['l','labs']
-	printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', 'Ttrue', 'Tfp',  'Tfn',  'Tm',   'Td',
-		'aTtrue', 'aTfp', 'aTfn', 'aTm',  'aTd',  'mean', 'smean',  'amean', 'samean']
+	""" list of values to print per dir """
+	printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', 
+		'Ttrue', 'Tfp',  'Tfn',  'Tm',   'Td',
+		'aTtrue', 'aTfp', 'aTfn', 'aTm',  'aTd',  
+		'mean', 'smean',  'amean', 'samean']
 
-	formats = {'mode': "%12s" , 
-	'thres': "%5.4s",
-	'dist':  "%5.4s",
-	'prec':  "%5.4s",
-	'recl':  "%5.4s",
-                 
-	'Ttrue': "%5.4s", 
-	'Tfp':   "%5.4s",
-	'Tfn':   "%5.4s",
-	'Tm':    "%5.4s",
-	'Td':    "%5.4s",
-                 
-	'aTtrue':"%5.4s", 
-	'aTfp':  "%5.4s",
-	'aTfn':  "%5.4s",
-	'aTm':   "%5.4s",
-	'aTd':   "%5.4s",
-                 
-	'mean':  "%5.40s", 
-	'smean': "%5.40s",
-	'amean':  "%5.40s", 
-	'samean': "%5.40s"}
-	
-	def file_gettruth(self,input):
-		from os.path import isfile
-		ftrulist = []
-		# search for match as filetask.input,".txt" 
-		ftru = '.'.join(input.split('.')[:-1])
-		ftru = '.'.join((ftru,'txt'))
-		if isfile(ftru):
-			ftrulist.append(ftru)
-		else:
-			# search for matches for filetask.input in the list of results
-			for i in range(len(self.reslist)):
-				check = '.'.join(self.reslist[i].split('.')[:-1])
-				check = '_'.join(check.split('_')[:-1])
-				if check == '.'.join(input.split('.')[:-1]):
-					ftrulist.append(self.reslist[i])
-		return ftrulist
+	""" per dir """
+	formats = {'mode': "%12s" , 'thres': "%5.4s", 
+		'dist':  "%5.4s", 'prec': "%5.4s", 'recl':  "%5.4s",
+		'Ttrue': "%5.4s", 'Tfp':   "%5.4s", 'Tfn':   "%5.4s", 
+		'Tm':    "%5.4s", 'Td':    "%5.4s",
+		'aTtrue':"%5.4s", 'aTfp':  "%5.4s", 'aTfn':  "%5.4s", 
+		'aTm':   "%5.4s", 'aTd':   "%5.4s",
+		'mean':  "%5.40s", 'smean': "%5.40s", 
+		'amean':  "%5.40s", 'samean': "%5.40s"}
 
-	def file_exec(self,input,output):
-		filetask = self.task(input,params=self.params)
-		computed_data = filetask.compute_all()
-		ftrulist = self.file_gettruth(filetask.input)
-		for i in ftrulist:
-			#print i
-			filetask.eval(computed_data,i,mode='rocloc',vmode='')
-			for i in self.valuenames:
-				self.v[i] += filetask.v[i]
-			for i in filetask.v['l']:
-				self.v['l'].append(i)
-			for i in filetask.v['labs']:
-				self.v['labs'].append(i)
-	
-	def dir_exec(self):
-		""" run file_exec on every input file """
-		self.l , self.labs = [], [] 
-		self.v = {}
-		for i in self.valuenames:
-			self.v[i] = 0. 
-		for i in self.valuelists:
-			self.v[i] = [] 
-		self.v['thres'] = self.params.threshold 
-		act_on_files(self.file_exec,self.sndlist,self.reslist, \
-			suffix='',filter=sndfile_filter)
-
 	def dir_eval(self):
-		totaltrue = self.v['expc']-self.v['bad']-self.v['Td']
-		totalfp = self.v['bad']+self.v['Td']
-                totalfn = self.v['missed']+self.v['Tm']
+		""" evaluate statistical data over the directory """
+		totaltrue = sum(self.v['expc'])-sum(self.v['bad'])-sum(self.v['Td'])
+		totalfp = sum(self.v['bad'])+sum(self.v['Td'])
+                totalfn = sum(self.v['missed'])+sum(self.v['Tm'])
 		self.P = 100*float(totaltrue)/max(totaltrue + totalfp,1)
 		self.R = 100*float(totaltrue)/max(totaltrue + totalfn,1)
 		if self.R < 0: self.R = 0
 		self.F = 2.* self.P*self.R / max(float(self.P+self.R),1)
-		
 		N = float(len(self.reslist))
-
 		self.v['mode']      = self.params.onsetmode
+		self.v['thres']     = self.params.threshold 
 		self.v['thres']     = "%2.3f" % self.params.threshold
 		self.v['dist']      = "%2.3f" % self.F
 		self.v['prec']      = "%2.3f" % self.P
@@ -112,8 +61,8 @@
 		self.v['aTtrue']    = totaltrue/N
 		self.v['aTfp']      = totalfp/N
 		self.v['aTfn']      = totalfn/N
-		self.v['aTm']       = self.v['Tm']/N
-		self.v['aTd']       = self.v['Td']/N
+		self.v['aTm']       = sum(self.v['Tm'])/N
+		self.v['aTd']       = sum(self.v['Td'])/N
 		self.v['mean']      = mmean(self.v['l'])
 		self.v['smean']     = stdev(self.v['l'])
 		self.v['amean']     = mmean(self.v['labs'])
@@ -122,7 +71,6 @@
 	def run_bench(self,modes=['dual'],thresholds=[0.5]):
 		self.modes = modes
 		self.thresholds = thresholds
-
 		self.pretty_titles()
 		for mode in self.modes:
 			self.params.onsetmode = mode
@@ -133,22 +81,12 @@
 				self.pretty_print()
 				#print self.v
 
-	def pretty_print(self,sep='|'):
-		for i in self.printnames:
-			print self.formats[i] % self.v[i], sep,
-		print
-
-	def pretty_titles(self,sep='|'):
-		for i in self.printnames:
-			print self.formats[i] % i, sep,
-		print
-
 	def auto_learn(self,modes=['dual'],thresholds=[0.1,1.5]):
 		""" simple dichotomia like algorithm to optimise threshold """
 		self.modes = modes
 		self.pretty_titles()
 		for mode in self.modes:
-			steps = 10 
+			steps = 11 
 			lesst = thresholds[0] 
 			topt = thresholds[1]
 			self.params.onsetmode = mode
@@ -230,7 +168,7 @@
 	if len(sys.argv) > 1: datapath = sys.argv[1]
 	else: print "ERR: a path is required"; sys.exit(1)
 	modes = ['complex', 'energy', 'phase', 'specdiff', 'kl', 'mkl', 'dual']
-	#modes = [ 'phase' ]
+	#modes = [ 'mkl' ]
 	thresholds = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2]
 	#thresholds = [1.5]
 
@@ -241,7 +179,6 @@
 	benchonset.params = taskparams()
 	benchonset.task = taskonset
 	benchonset.valuesdict = {}
-
 
 	try:
 		#benchonset.auto_learn2(modes=modes)