diff --git a/pyAudioAnalysis/ShortTermFeatures.py b/pyAudioAnalysis/ShortTermFeatures.py index 3a3bd4250..f79efa195 100644 --- a/pyAudioAnalysis/ShortTermFeatures.py +++ b/pyAudioAnalysis/ShortTermFeatures.py @@ -332,7 +332,7 @@ def chromagram(signal, sampling_rate, window, step, plot=False, sampling_rate: the sampling freq (in Hz) window: the short-term window size (in samples) step: the short-term window step (in samples) - plot: flag, 1 if results are to be ploted + plot: flag, 1 if results are to be plotted RETURNS: """ window = int(window) @@ -397,7 +397,7 @@ def spectrogram(signal, sampling_rate, window, step, plot=False, sampling_rate: the sampling freq (in Hz) window: the short-term window size (in samples) step: the short-term window step (in samples) - plot: flag, 1 if results are to be ploted + plot: flag, 1 if results are to be plotted show_progress flag for showing progress using tqdm RETURNS: """ diff --git a/pyAudioAnalysis/audioSegmentation.py b/pyAudioAnalysis/audioSegmentation.py index aa718af42..956b5b7bc 100644 --- a/pyAudioAnalysis/audioSegmentation.py +++ b/pyAudioAnalysis/audioSegmentation.py @@ -682,10 +682,10 @@ def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5, - signal: the input audio signal - sampling_rate: sampling freq - st_win, st_step: window size and step in seconds - - smoothWindow: (optinal) smooth window (in seconds) - - weight: (optinal) weight factor (0 < weight < 1) + - smoothWindow: (optional) smooth window (in seconds) + - weight: (optional) weight factor (0 < weight < 1) the higher, the more strict - - plot: (optinal) True if results are to be plotted + - plot: (optional) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that diff --git a/pyAudioAnalysis/audioTrainTest.py b/pyAudioAnalysis/audioTrainTest.py index 0913b656d..ece690a49 100644 --- a/pyAudioAnalysis/audioTrainTest.py +++ b/pyAudioAnalysis/audioTrainTest.py @@ -711,7 +711,7 @@ class i is [n_samples x numOfDimensions] # this is just for debugging (it should be equal to f1) f1_b = sklearn.metrics.f1_score(y_test_all, y_pred_all, average='macro') - # Note: np.mean(f1_per_exp) will not be exacty equal to the + # Note: np.mean(f1_per_exp) will not be exactly equal to the # overall f1 (i.e. f1 and f1_b because these are calculated on a # per-sample basis) f1_std = np.std(f1_per_exp)