@@ -216,7 +216,7 @@ categories:
216
216
model :
217
217
fn : ' SUM::mctot(mcfrac[0.,1.]*sig, mcbkg)'
218
218
- ptrange : [1., 5.]
219
- range : [2.18 , 2.40]
219
+ range : [2.16 , 2.40]
220
220
components :
221
221
# sig:
222
222
# fn: 'Gaussian::sig(m, mean[2.28,2.29], sigma_g1[.005,.01])'
@@ -544,9 +544,9 @@ categories:
544
544
use_cuts : [True, True, True, True, True, True, True]
545
545
cuts :
546
546
- ["mlPromptScore > 0.97", "mlPromptScore > 0.9", "mlPromptScore > 0.9", "mlPromptScore > 0.85", "mlPromptScore > 0.85", "mlPromptScore > 0.8", "mlPromptScore > 0.8", "mlPromptScore > 0.6", "mlPromptScore > 0.6"] # default
547
- - [null,null,null,null,null,null,null,null,null]
547
+ - [null,null,null,null,null,null,null,null,null,null ]
548
548
- ["mlPromptScore > 0.85", "mlPromptScore > 0.6", "mlPromptScore > 0.6", "mlPromptScore > 0.4", "mlPromptScore > 0.4", "mlPromptScore > 0.4", "mlPromptScore > 0.4", "mlPromptScore > 0.15", "mlPromptScore > 0.15"] # loosest
549
- - ["mlPromptScore > 0.9 ", "mlPromptScore > 0.7 ", "mlPromptScore > 0.7 ", "mlPromptScore > 0.6 ", "mlPromptScore > 0.6 ", "mlPromptScore > 0.6 ", "mlPromptScore > 0.6 ", "mlPromptScore > 0.3 ", "mlPromptScore > 0.3 "] # loose
549
+ - ["mlPromptScore > 0.961 ", "mlPromptScore > 0.83 ", "mlPromptScore > 0.84 ", "mlPromptScore > 0.74 ", "mlPromptScore > 0.74 ", "mlPromptScore > 0.62 ", "mlPromptScore > 0.63 ", "mlPromptScore > 0.15 ", "mlPromptScore > 0.15 "] # loose
550
550
- ["mlPromptScore > 0.98", "mlPromptScore > 0.9", "mlPromptScore > 0.9", "mlPromptScore > 0.85", "mlPromptScore > 0.85", "mlPromptScore > 0.8", "mlPromptScore > 0.8", "mlPromptScore > 0.6", "mlPromptScore > 0.6"] # tight 2
551
551
- ["mlPromptScore > 0.97", "mlPromptScore > 0.9", "mlPromptScore > 0.9", "mlPromptScore > 0.85", "mlPromptScore > 0.85", "mlPromptScore > 0.8", "mlPromptScore > 0.8", "mlPromptScore > 0.6", "mlPromptScore > 0.6"] # tight 4
552
- - ["mlPromptScore > 0.98 ", "mlPromptScore > 0.95 ", "mlPromptScore > 0.95 ", "mlPromptScore > 0.9 ", "mlPromptScore > 0.9 ", "mlPromptScore > 0.9 ", "mlPromptScore > 0.9 ", "mlPromptScore > 0.7 ", "mlPromptScore > 0.7 "] # tight
552
+ - ["mlPromptScore > 0.978 ", "mlPromptScore > 0.94 ", "mlPromptScore > 0.937 ", "mlPromptScore > 0.915 ", "mlPromptScore > 0.91 ", "mlPromptScore > 0.89 ", "mlPromptScore > 0.88 ", "mlPromptScore > 0.85 ", "mlPromptScore > 0.85 "] # tight
0 commit comments